commit 408b25f1f1cc30eb638c702b2ade2507bdfe8a91 Author: sigonasr2 Date: Mon Oct 16 17:15:10 2023 -0500 Add project files. diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..1ff0c42 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,63 @@ +############################################################################### +# Set default behavior to automatically normalize line endings. +############################################################################### +* text=auto + +############################################################################### +# Set default behavior for command prompt diff. +# +# This is need for earlier builds of msysgit that does not have it on by +# default for csharp files. +# Note: This is only used by command line +############################################################################### +#*.cs diff=csharp + +############################################################################### +# Set the merge driver for project and solution files +# +# Merging from the command prompt will add diff markers to the files if there +# are conflicts (Merging from VS is not affected by the settings below, in VS +# the diff markers are never inserted). Diff markers may cause the following +# file extensions to fail to load in VS. An alternative would be to treat +# these files as binary and thus will always conflict and require user +# intervention with every merge. To do so, just uncomment the entries below +############################################################################### +#*.sln merge=binary +#*.csproj merge=binary +#*.vbproj merge=binary +#*.vcxproj merge=binary +#*.vcproj merge=binary +#*.dbproj merge=binary +#*.fsproj merge=binary +#*.lsproj merge=binary +#*.wixproj merge=binary +#*.modelproj merge=binary +#*.sqlproj merge=binary +#*.wwaproj merge=binary + +############################################################################### +# behavior for image files +# +# image files are treated as binary by default. +############################################################################### +#*.jpg binary +#*.png binary +#*.gif binary + +############################################################################### +# diff behavior for common document formats +# +# Convert binary document formats to text before diffing them. This feature +# is only available from the command line. Turn it on by uncommenting the +# entries below. +############################################################################### +#*.doc diff=astextplain +#*.DOC diff=astextplain +#*.docx diff=astextplain +#*.DOCX diff=astextplain +#*.dot diff=astextplain +#*.DOT diff=astextplain +#*.pdf diff=astextplain +#*.PDF diff=astextplain +#*.rtf diff=astextplain +#*.RTF diff=astextplain diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9491a2f --- /dev/null +++ b/.gitignore @@ -0,0 +1,363 @@ +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Ww][Ii][Nn]32/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Oo]ut/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# ASP.NET Scaffolding +ScaffoldingReadMe.txt + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.ilk +*.meta +*.obj +*.iobj +*.pch +*.pdb +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*_wpftmp.csproj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Coverlet is a free, cross platform Code Coverage Tool +coverage*.json +coverage*.xml +coverage*.info + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*~ +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) +__pycache__/ +*.pyc + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ + +# Fody - auto-generated XML schema +FodyWeavers.xsd \ No newline at end of file diff --git a/Example.sln b/Example.sln new file mode 100644 index 0000000..f5b33c8 --- /dev/null +++ b/Example.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.5.33516.290 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Example", "Example\Example.vcxproj", "{C421608B-8317-4954-89AB-212E3826C3BC}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {C421608B-8317-4954-89AB-212E3826C3BC}.Debug|x64.ActiveCfg = Debug|x64 + {C421608B-8317-4954-89AB-212E3826C3BC}.Debug|x64.Build.0 = Debug|x64 + {C421608B-8317-4954-89AB-212E3826C3BC}.Debug|x86.ActiveCfg = Debug|Win32 + {C421608B-8317-4954-89AB-212E3826C3BC}.Debug|x86.Build.0 = Debug|Win32 + {C421608B-8317-4954-89AB-212E3826C3BC}.Release|x64.ActiveCfg = Release|x64 + {C421608B-8317-4954-89AB-212E3826C3BC}.Release|x64.Build.0 = Release|x64 + {C421608B-8317-4954-89AB-212E3826C3BC}.Release|x86.ActiveCfg = Release|Win32 + {C421608B-8317-4954-89AB-212E3826C3BC}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {3981479D-8997-4CB6-BE01-14211B2B833D} + EndGlobalSection +EndGlobal diff --git a/Example/ArtWork.VGA b/Example/ArtWork.VGA new file mode 100644 index 0000000..632a89e --- /dev/null +++ b/Example/ArtWork.VGA @@ -0,0 +1 @@ +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,176,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, \ No newline at end of file diff --git a/Example/Example.vcxproj b/Example/Example.vcxproj new file mode 100644 index 0000000..b27d168 --- /dev/null +++ b/Example/Example.vcxproj @@ -0,0 +1,166 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 16.0 + Win32Proj + {c421608b-8317-4954-89ab-212e3826c3bc} + Test + 10.0 + Test + + + + Application + true + v143 + Unicode + + + Application + false + v143 + true + Unicode + + + Application + true + v143 + Unicode + + + Application + false + v143 + true + Unicode + + + + + + + + + + + + + + + + + + + + + C:\Users\sigon\Documents\asio-1.18.0\include;$(IncludePath) + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + C:\Users\sigon\source\repos\Example\Example\include;C:\Users\sigon\source\repos\Example\Example\src;%(AdditionalIncludeDirectories) + + + Console + true + C:\Users\sigon\source\repos\Example\Example\libraries;%(AdditionalLibraryDirectories) + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + C:\Users\sigon\source\repos\Example\Example\include;%(AdditionalIncludeDirectories) + + + Console + true + true + true + C:\Users\sigon\source\repos\Example\Example\libraries;%(AdditionalLibraryDirectories) + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + C:\Users\sigon\source\repos\Example\Example\include;C:\Users\sigon\source\repos\Example\Example\src;%(AdditionalIncludeDirectories) + + + Console + true + C:\Users\sigon\source\repos\Example\Example\libraries;%(AdditionalLibraryDirectories) + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp17 + C:\Users\sigon\source\repos\Example\Example\include;%(AdditionalIncludeDirectories) + + + Console + true + true + true + C:\Users\sigon\source\repos\Example\Example\libraries;%(AdditionalLibraryDirectories) + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Example/Example.vcxproj.filters b/Example/Example.vcxproj.filters new file mode 100644 index 0000000..2ac1318 --- /dev/null +++ b/Example/Example.vcxproj.filters @@ -0,0 +1,63 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + Source Files + + + Source Files + + + \ No newline at end of file diff --git a/Example/Freshman.ttf b/Example/Freshman.ttf new file mode 100644 index 0000000..a20b7f9 Binary files /dev/null and b/Example/Freshman.ttf differ diff --git a/Example/GILB____.TTF b/Example/GILB____.TTF new file mode 100644 index 0000000..1cad3fd Binary files /dev/null and b/Example/GILB____.TTF differ diff --git a/Example/GasoekOne-Regular.ttf b/Example/GasoekOne-Regular.ttf new file mode 100644 index 0000000..ef38eec Binary files /dev/null and b/Example/GasoekOne-Regular.ttf differ diff --git a/Example/Hero.wav b/Example/Hero.wav new file mode 100644 index 0000000..8f3a46d Binary files /dev/null and b/Example/Hero.wav differ diff --git a/Example/Sigg.wav b/Example/Sigg.wav new file mode 100644 index 0000000..e27deb3 Binary files /dev/null and b/Example/Sigg.wav differ diff --git a/Example/WeatherChannel.wav b/Example/WeatherChannel.wav new file mode 100644 index 0000000..a259209 Binary files /dev/null and b/Example/WeatherChannel.wav differ diff --git a/Example/assets/Monsters.txt b/Example/assets/Monsters.txt new file mode 100644 index 0000000..4ed1ae9 --- /dev/null +++ b/Example/assets/Monsters.txt @@ -0,0 +1,76 @@ +Monsters +{ + GREEN_SLIME = 4, 0.4, TEST + GREEN_SLIME + { + Name = Green Slime + Health = 10 + Attack = 5 + + CollisionDmg = 5 + + MoveSpd = 110 + Size = 80 + + Strategy = RUN_TOWARDS + + ANIMATION[0] = TEST + } + BLUE_SLIME + { + Name = Blue Slime + Health = 30 + Attack = 10 + + CollisionDmg = 0 + + MoveSpd = 80 + Size = 100 + + Strategy = SHOOT_AFAR + } + RED_SLIME + { + Name = Red Slime + Health = 25 + Attack = 10 + + CollisionDmg = 10 + + MoveSpd = 95 + Size = 120 + + Strategy = RUN_TOWARDS + } + YELLOW_SLIME + { + Name = Yellow Slime + Health = 175 + Attack = 10 + + CollisionDmg = 15 + + MoveSpd = 40 + Size = 160 + + Strategy = RUN_TOWARDS + } + FLOWER_TURRET + { + Name = Flower Turret + Health = 40 + Attack = 10 + + CollisionDmg = 0 + + MoveSpd = 0 + Size = 100 + + Strategy = TURRET + } + + + TEST[0]=4 + TEST[1]=6 + TEST[2]=8 +} \ No newline at end of file diff --git a/Example/assets/avatar.bmp b/Example/assets/avatar.bmp new file mode 100644 index 0000000..3515a1a Binary files /dev/null and b/Example/assets/avatar.bmp differ diff --git a/Example/assets/avatar.png b/Example/assets/avatar.png new file mode 100644 index 0000000..f51b966 Binary files /dev/null and b/Example/assets/avatar.png differ diff --git a/Example/assets/boss1.mp3 b/Example/assets/boss1.mp3 new file mode 100644 index 0000000..cf7c6b9 Binary files /dev/null and b/Example/assets/boss1.mp3 differ diff --git a/Example/assets/hit1.mp3 b/Example/assets/hit1.mp3 new file mode 100644 index 0000000..3e5e890 Binary files /dev/null and b/Example/assets/hit1.mp3 differ diff --git a/Example/assets/kosuzu.png b/Example/assets/kosuzu.png new file mode 100644 index 0000000..a5313fc Binary files /dev/null and b/Example/assets/kosuzu.png differ diff --git a/Example/cube.mtl b/Example/cube.mtl new file mode 100644 index 0000000..e690002 --- /dev/null +++ b/Example/cube.mtl @@ -0,0 +1,10 @@ +# Exported from Wings 3D 2.2.9 +newmtl default +Ns 19.999999999999996 +d 1.0 +illum 2 +Kd 0.7898538076923077 0.8133333333333334 0.6940444444444445 +Ka 0.0 0.0 0.0 +Ks 0.1689853807692308 0.17133333333333334 0.15940444444444446 +Ke 0.0 0.0 0.0 + diff --git a/Example/dirt.png b/Example/dirt.png new file mode 100644 index 0000000..7951b56 Binary files /dev/null and b/Example/dirt.png differ diff --git a/Example/freetype.dll b/Example/freetype.dll new file mode 100644 index 0000000..d651953 Binary files /dev/null and b/Example/freetype.dll differ diff --git a/Example/include/freetype/config/ftconfig.h b/Example/include/freetype/config/ftconfig.h new file mode 100644 index 0000000..a851516 --- /dev/null +++ b/Example/include/freetype/config/ftconfig.h @@ -0,0 +1,51 @@ +/**************************************************************************** + * + * ftconfig.h + * + * ANSI-specific configuration file (specification only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This header file contains a number of macro definitions that are used by + * the rest of the engine. Most of the macros here are automatically + * determined at compile time, and you should not need to change it to port + * FreeType, except to compile the library with a non-ANSI compiler. + * + * Note however that if some specific modifications are needed, we advise + * you to place a modified copy in your build directory. + * + * The build directory is usually `builds/`, and contains + * system-specific files that are always included first when building the + * library. + * + * This ANSI version should stay in `include/config/`. + * + */ + +#ifndef FTCONFIG_H_ +#define FTCONFIG_H_ + +#include +#include FT_CONFIG_OPTIONS_H +#include FT_CONFIG_STANDARD_LIBRARY_H + +#include +#include +#include + +#endif /* FTCONFIG_H_ */ + + +/* END */ diff --git a/Example/include/freetype/config/ftheader.h b/Example/include/freetype/config/ftheader.h new file mode 100644 index 0000000..e607bce --- /dev/null +++ b/Example/include/freetype/config/ftheader.h @@ -0,0 +1,836 @@ +/**************************************************************************** + * + * ftheader.h + * + * Build macros of the FreeType 2 library. + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + +#ifndef FTHEADER_H_ +#define FTHEADER_H_ + + + /*@***********************************************************************/ + /* */ + /* */ + /* FT_BEGIN_HEADER */ + /* */ + /* */ + /* This macro is used in association with @FT_END_HEADER in header */ + /* files to ensure that the declarations within are properly */ + /* encapsulated in an `extern "C" { .. }` block when included from a */ + /* C++ compiler. */ + /* */ +#ifndef FT_BEGIN_HEADER +# ifdef __cplusplus +# define FT_BEGIN_HEADER extern "C" { +# else +# define FT_BEGIN_HEADER /* nothing */ +# endif +#endif + + + /*@***********************************************************************/ + /* */ + /* */ + /* FT_END_HEADER */ + /* */ + /* */ + /* This macro is used in association with @FT_BEGIN_HEADER in header */ + /* files to ensure that the declarations within are properly */ + /* encapsulated in an `extern "C" { .. }` block when included from a */ + /* C++ compiler. */ + /* */ +#ifndef FT_END_HEADER +# ifdef __cplusplus +# define FT_END_HEADER } +# else +# define FT_END_HEADER /* nothing */ +# endif +#endif + + + /************************************************************************** + * + * Aliases for the FreeType 2 public and configuration files. + * + */ + + /************************************************************************** + * + * @section: + * header_file_macros + * + * @title: + * Header File Macros + * + * @abstract: + * Macro definitions used to `#include` specific header files. + * + * @description: + * In addition to the normal scheme of including header files like + * + * ``` + * #include + * #include + * #include + * ``` + * + * it is possible to used named macros instead. They can be used + * directly in `#include` statements as in + * + * ``` + * #include FT_FREETYPE_H + * #include FT_MULTIPLE_MASTERS_H + * #include FT_GLYPH_H + * ``` + * + * These macros were introduced to overcome the infamous 8.3~naming rule + * required by DOS (and `FT_MULTIPLE_MASTERS_H` is a lot more meaningful + * than `ftmm.h`). + * + */ + + + /* configuration files */ + + /************************************************************************** + * + * @macro: + * FT_CONFIG_CONFIG_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * FreeType~2 configuration data. + * + */ +#ifndef FT_CONFIG_CONFIG_H +#define FT_CONFIG_CONFIG_H +#endif + + + /************************************************************************** + * + * @macro: + * FT_CONFIG_STANDARD_LIBRARY_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * FreeType~2 interface to the standard C library functions. + * + */ +#ifndef FT_CONFIG_STANDARD_LIBRARY_H +#define FT_CONFIG_STANDARD_LIBRARY_H +#endif + + + /************************************************************************** + * + * @macro: + * FT_CONFIG_OPTIONS_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * FreeType~2 project-specific configuration options. + * + */ +#ifndef FT_CONFIG_OPTIONS_H +#define FT_CONFIG_OPTIONS_H +#endif + + + /************************************************************************** + * + * @macro: + * FT_CONFIG_MODULES_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * list of FreeType~2 modules that are statically linked to new library + * instances in @FT_Init_FreeType. + * + */ +#ifndef FT_CONFIG_MODULES_H +#define FT_CONFIG_MODULES_H +#endif + + /* */ + + /* public headers */ + + /************************************************************************** + * + * @macro: + * FT_FREETYPE_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * base FreeType~2 API. + * + */ +#define FT_FREETYPE_H + + + /************************************************************************** + * + * @macro: + * FT_ERRORS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * list of FreeType~2 error codes (and messages). + * + * It is included by @FT_FREETYPE_H. + * + */ +#define FT_ERRORS_H + + + /************************************************************************** + * + * @macro: + * FT_MODULE_ERRORS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * list of FreeType~2 module error offsets (and messages). + * + */ +#define FT_MODULE_ERRORS_H + + + /************************************************************************** + * + * @macro: + * FT_SYSTEM_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 interface to low-level operations (i.e., memory management + * and stream i/o). + * + * It is included by @FT_FREETYPE_H. + * + */ +#define FT_SYSTEM_H + + + /************************************************************************** + * + * @macro: + * FT_IMAGE_H + * + * @description: + * A macro used in `#include` statements to name the file containing type + * definitions related to glyph images (i.e., bitmaps, outlines, + * scan-converter parameters). + * + * It is included by @FT_FREETYPE_H. + * + */ +#define FT_IMAGE_H + + + /************************************************************************** + * + * @macro: + * FT_TYPES_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * basic data types defined by FreeType~2. + * + * It is included by @FT_FREETYPE_H. + * + */ +#define FT_TYPES_H + + + /************************************************************************** + * + * @macro: + * FT_LIST_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * list management API of FreeType~2. + * + * (Most applications will never need to include this file.) + * + */ +#define FT_LIST_H + + + /************************************************************************** + * + * @macro: + * FT_OUTLINE_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * scalable outline management API of FreeType~2. + * + */ +#define FT_OUTLINE_H + + + /************************************************************************** + * + * @macro: + * FT_SIZES_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * API which manages multiple @FT_Size objects per face. + * + */ +#define FT_SIZES_H + + + /************************************************************************** + * + * @macro: + * FT_MODULE_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * module management API of FreeType~2. + * + */ +#define FT_MODULE_H + + + /************************************************************************** + * + * @macro: + * FT_RENDER_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * renderer module management API of FreeType~2. + * + */ +#define FT_RENDER_H + + + /************************************************************************** + * + * @macro: + * FT_DRIVER_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * structures and macros related to the driver modules. + * + */ +#define FT_DRIVER_H + + + /************************************************************************** + * + * @macro: + * FT_AUTOHINTER_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * structures and macros related to the auto-hinting module. + * + * Deprecated since version~2.9; use @FT_DRIVER_H instead. + * + */ +#define FT_AUTOHINTER_H FT_DRIVER_H + + + /************************************************************************** + * + * @macro: + * FT_CFF_DRIVER_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * structures and macros related to the CFF driver module. + * + * Deprecated since version~2.9; use @FT_DRIVER_H instead. + * + */ +#define FT_CFF_DRIVER_H FT_DRIVER_H + + + /************************************************************************** + * + * @macro: + * FT_TRUETYPE_DRIVER_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * structures and macros related to the TrueType driver module. + * + * Deprecated since version~2.9; use @FT_DRIVER_H instead. + * + */ +#define FT_TRUETYPE_DRIVER_H FT_DRIVER_H + + + /************************************************************************** + * + * @macro: + * FT_PCF_DRIVER_H + * + * @description: + * A macro used in `#include` statements to name the file containing + * structures and macros related to the PCF driver module. + * + * Deprecated since version~2.9; use @FT_DRIVER_H instead. + * + */ +#define FT_PCF_DRIVER_H FT_DRIVER_H + + + /************************************************************************** + * + * @macro: + * FT_TYPE1_TABLES_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * types and API specific to the Type~1 format. + * + */ +#define FT_TYPE1_TABLES_H + + + /************************************************************************** + * + * @macro: + * FT_TRUETYPE_IDS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * enumeration values which identify name strings, languages, encodings, + * etc. This file really contains a _large_ set of constant macro + * definitions, taken from the TrueType and OpenType specifications. + * + */ +#define FT_TRUETYPE_IDS_H + + + /************************************************************************** + * + * @macro: + * FT_TRUETYPE_TABLES_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * types and API specific to the TrueType (as well as OpenType) format. + * + */ +#define FT_TRUETYPE_TABLES_H + + + /************************************************************************** + * + * @macro: + * FT_TRUETYPE_TAGS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * definitions of TrueType four-byte 'tags' which identify blocks in + * SFNT-based font formats (i.e., TrueType and OpenType). + * + */ +#define FT_TRUETYPE_TAGS_H + + + /************************************************************************** + * + * @macro: + * FT_BDF_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * definitions of an API which accesses BDF-specific strings from a face. + * + */ +#define FT_BDF_H + + + /************************************************************************** + * + * @macro: + * FT_CID_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * definitions of an API which access CID font information from a face. + * + */ +#define FT_CID_H + + + /************************************************************************** + * + * @macro: + * FT_GZIP_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * definitions of an API which supports gzip-compressed files. + * + */ +#define FT_GZIP_H + + + /************************************************************************** + * + * @macro: + * FT_LZW_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * definitions of an API which supports LZW-compressed files. + * + */ +#define FT_LZW_H + + + /************************************************************************** + * + * @macro: + * FT_BZIP2_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * definitions of an API which supports bzip2-compressed files. + * + */ +#define FT_BZIP2_H + + + /************************************************************************** + * + * @macro: + * FT_WINFONTS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * definitions of an API which supports Windows FNT files. + * + */ +#define FT_WINFONTS_H + + + /************************************************************************** + * + * @macro: + * FT_GLYPH_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * API of the optional glyph management component. + * + */ +#define FT_GLYPH_H + + + /************************************************************************** + * + * @macro: + * FT_BITMAP_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * API of the optional bitmap conversion component. + * + */ +#define FT_BITMAP_H + + + /************************************************************************** + * + * @macro: + * FT_BBOX_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * API of the optional exact bounding box computation routines. + * + */ +#define FT_BBOX_H + + + /************************************************************************** + * + * @macro: + * FT_CACHE_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * API of the optional FreeType~2 cache sub-system. + * + */ +#define FT_CACHE_H + + + /************************************************************************** + * + * @macro: + * FT_MAC_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * Macintosh-specific FreeType~2 API. The latter is used to access fonts + * embedded in resource forks. + * + * This header file must be explicitly included by client applications + * compiled on the Mac (note that the base API still works though). + * + */ +#define FT_MAC_H + + + /************************************************************************** + * + * @macro: + * FT_MULTIPLE_MASTERS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * optional multiple-masters management API of FreeType~2. + * + */ +#define FT_MULTIPLE_MASTERS_H + + + /************************************************************************** + * + * @macro: + * FT_SFNT_NAMES_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * optional FreeType~2 API which accesses embedded 'name' strings in + * SFNT-based font formats (i.e., TrueType and OpenType). + * + */ +#define FT_SFNT_NAMES_H + + + /************************************************************************** + * + * @macro: + * FT_OPENTYPE_VALIDATE_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * optional FreeType~2 API which validates OpenType tables ('BASE', + * 'GDEF', 'GPOS', 'GSUB', 'JSTF'). + * + */ +#define FT_OPENTYPE_VALIDATE_H + + + /************************************************************************** + * + * @macro: + * FT_GX_VALIDATE_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * optional FreeType~2 API which validates TrueTypeGX/AAT tables ('feat', + * 'mort', 'morx', 'bsln', 'just', 'kern', 'opbd', 'trak', 'prop'). + * + */ +#define FT_GX_VALIDATE_H + + + /************************************************************************** + * + * @macro: + * FT_PFR_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which accesses PFR-specific data. + * + */ +#define FT_PFR_H + + + /************************************************************************** + * + * @macro: + * FT_STROKER_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which provides functions to stroke outline paths. + */ +#define FT_STROKER_H + + + /************************************************************************** + * + * @macro: + * FT_SYNTHESIS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which performs artificial obliquing and emboldening. + */ +#define FT_SYNTHESIS_H + + + /************************************************************************** + * + * @macro: + * FT_FONT_FORMATS_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which provides functions specific to font formats. + */ +#define FT_FONT_FORMATS_H + + /* deprecated */ +#define FT_XFREE86_H FT_FONT_FORMATS_H + + + /************************************************************************** + * + * @macro: + * FT_TRIGONOMETRY_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which performs trigonometric computations (e.g., + * cosines and arc tangents). + */ +#define FT_TRIGONOMETRY_H + + + /************************************************************************** + * + * @macro: + * FT_LCD_FILTER_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which performs color filtering for subpixel rendering. + */ +#define FT_LCD_FILTER_H + + + /************************************************************************** + * + * @macro: + * FT_INCREMENTAL_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which performs incremental glyph loading. + */ +#define FT_INCREMENTAL_H + + + /************************************************************************** + * + * @macro: + * FT_GASP_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which returns entries from the TrueType GASP table. + */ +#define FT_GASP_H + + + /************************************************************************** + * + * @macro: + * FT_ADVANCES_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which returns individual and ranged glyph advances. + */ +#define FT_ADVANCES_H + + + /************************************************************************** + * + * @macro: + * FT_COLOR_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which handles the OpenType 'CPAL' table. + */ +#define FT_COLOR_H + + + /************************************************************************** + * + * @macro: + * FT_OTSVG_H + * + * @description: + * A macro used in `#include` statements to name the file containing the + * FreeType~2 API which handles the OpenType 'SVG~' glyphs. + */ +#define FT_OTSVG_H + + + /* */ + + /* These header files don't need to be included by the user. */ +#define FT_ERROR_DEFINITIONS_H +#define FT_PARAMETER_TAGS_H + + /* Deprecated macros. */ +#define FT_UNPATENTED_HINTING_H +#define FT_TRUETYPE_UNPATENTED_H + + /* `FT_CACHE_H` is the only header file needed for the cache subsystem. */ +#define FT_CACHE_IMAGE_H FT_CACHE_H +#define FT_CACHE_SMALL_BITMAPS_H FT_CACHE_H +#define FT_CACHE_CHARMAP_H FT_CACHE_H + + /* The internals of the cache sub-system are no longer exposed. We */ + /* default to `FT_CACHE_H` at the moment just in case, but we know */ + /* of no rogue client that uses them. */ + /* */ +#define FT_CACHE_MANAGER_H FT_CACHE_H +#define FT_CACHE_INTERNAL_MRU_H FT_CACHE_H +#define FT_CACHE_INTERNAL_MANAGER_H FT_CACHE_H +#define FT_CACHE_INTERNAL_CACHE_H FT_CACHE_H +#define FT_CACHE_INTERNAL_GLYPH_H FT_CACHE_H +#define FT_CACHE_INTERNAL_IMAGE_H FT_CACHE_H +#define FT_CACHE_INTERNAL_SBITS_H FT_CACHE_H + +/* TODO(david): Move this section below to a different header */ +#ifdef FT2_BUILD_LIBRARY +#if defined( _MSC_VER ) /* Visual C++ (and Intel C++) */ + + /* We disable the warning `conditional expression is constant' here */ + /* in order to compile cleanly with the maximum level of warnings. */ + /* In particular, the warning complains about stuff like `while(0)' */ + /* which is very useful in macro definitions. There is no benefit */ + /* in having it enabled. */ +#pragma warning( disable : 4127 ) + +#endif /* _MSC_VER */ +#endif /* FT2_BUILD_LIBRARY */ + +#endif /* FTHEADER_H_ */ + + +/* END */ diff --git a/Example/include/freetype/config/ftmodule.h b/Example/include/freetype/config/ftmodule.h new file mode 100644 index 0000000..b315bab --- /dev/null +++ b/Example/include/freetype/config/ftmodule.h @@ -0,0 +1,33 @@ +/* + * This file registers the FreeType modules compiled into the library. + * + * If you use GNU make, this file IS NOT USED! Instead, it is created in + * the objects directory (normally `/objs/`) based on information + * from `/modules.cfg`. + * + * Please read `docs/INSTALL.ANY` and `docs/CUSTOMIZE` how to compile + * FreeType without GNU make. + * + */ + +FT_USE_MODULE( FT_Module_Class, autofit_module_class ) +FT_USE_MODULE( FT_Driver_ClassRec, tt_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, t1_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, cff_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, t1cid_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, pfr_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, t42_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, winfnt_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, pcf_driver_class ) +FT_USE_MODULE( FT_Driver_ClassRec, bdf_driver_class ) +FT_USE_MODULE( FT_Module_Class, psaux_module_class ) +FT_USE_MODULE( FT_Module_Class, psnames_module_class ) +FT_USE_MODULE( FT_Module_Class, pshinter_module_class ) +FT_USE_MODULE( FT_Module_Class, sfnt_module_class ) +FT_USE_MODULE( FT_Renderer_Class, ft_smooth_renderer_class ) +FT_USE_MODULE( FT_Renderer_Class, ft_raster1_renderer_class ) +FT_USE_MODULE( FT_Renderer_Class, ft_sdf_renderer_class ) +FT_USE_MODULE( FT_Renderer_Class, ft_bitmap_sdf_renderer_class ) +FT_USE_MODULE( FT_Renderer_Class, ft_svg_renderer_class ) + +/* EOF */ diff --git a/Example/include/freetype/config/ftoption.h b/Example/include/freetype/config/ftoption.h new file mode 100644 index 0000000..1976b33 --- /dev/null +++ b/Example/include/freetype/config/ftoption.h @@ -0,0 +1,1014 @@ +/**************************************************************************** + * + * ftoption.h + * + * User-selectable configuration macros (specification only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTOPTION_H_ +#define FTOPTION_H_ + + +#include + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * USER-SELECTABLE CONFIGURATION MACROS + * + * This file contains the default configuration macro definitions for a + * standard build of the FreeType library. There are three ways to use + * this file to build project-specific versions of the library: + * + * - You can modify this file by hand, but this is not recommended in + * cases where you would like to build several versions of the library + * from a single source directory. + * + * - You can put a copy of this file in your build directory, more + * precisely in `$BUILD/freetype/config/ftoption.h`, where `$BUILD` is + * the name of a directory that is included _before_ the FreeType include + * path during compilation. + * + * The default FreeType Makefiles use the build directory + * `builds/` by default, but you can easily change that for your + * own projects. + * + * - Copy the file to `$BUILD/ft2build.h` and modify it + * slightly to pre-define the macro `FT_CONFIG_OPTIONS_H` used to locate + * this file during the build. For example, + * + * ``` + * #define FT_CONFIG_OPTIONS_H + * #include + * ``` + * + * will use `$BUILD/myftoptions.h` instead of this file for macro + * definitions. + * + * Note also that you can similarly pre-define the macro + * `FT_CONFIG_MODULES_H` used to locate the file listing of the modules + * that are statically linked to the library at compile time. By + * default, this file is ``. + * + * We highly recommend using the third method whenever possible. + * + */ + + + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** G E N E R A L F R E E T Y P E 2 C O N F I G U R A T I O N ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + + + /*#************************************************************************ + * + * If you enable this configuration option, FreeType recognizes an + * environment variable called `FREETYPE_PROPERTIES`, which can be used to + * control the various font drivers and modules. The controllable + * properties are listed in the section @properties. + * + * You have to undefine this configuration option on platforms that lack + * the concept of environment variables (and thus don't have the `getenv` + * function), for example Windows CE. + * + * `FREETYPE_PROPERTIES` has the following syntax form (broken here into + * multiple lines for better readability). + * + * ``` + * + * ':' + * '=' + * + * ':' + * '=' + * ... + * ``` + * + * Example: + * + * ``` + * FREETYPE_PROPERTIES=truetype:interpreter-version=35 \ + * cff:no-stem-darkening=1 + * ``` + * + */ +#define FT_CONFIG_OPTION_ENVIRONMENT_PROPERTIES + + + /************************************************************************** + * + * Uncomment the line below if you want to activate LCD rendering + * technology similar to ClearType in this build of the library. This + * technology triples the resolution in the direction color subpixels. To + * mitigate color fringes inherent to this technology, you also need to + * explicitly set up LCD filtering. + * + * When this macro is not defined, FreeType offers alternative LCD + * rendering technology that produces excellent output. + */ +/* #define FT_CONFIG_OPTION_SUBPIXEL_RENDERING */ + + + /************************************************************************** + * + * Many compilers provide a non-ANSI 64-bit data type that can be used by + * FreeType to speed up some computations. However, this will create some + * problems when compiling the library in strict ANSI mode. + * + * For this reason, the use of 64-bit integers is normally disabled when + * the `__STDC__` macro is defined. You can however disable this by + * defining the macro `FT_CONFIG_OPTION_FORCE_INT64` here. + * + * For most compilers, this will only create compilation warnings when + * building the library. + * + * ObNote: The compiler-specific 64-bit integers are detected in the + * file `ftconfig.h` either statically or through the `configure` + * script on supported platforms. + */ +#undef FT_CONFIG_OPTION_FORCE_INT64 + + + /************************************************************************** + * + * If this macro is defined, do not try to use an assembler version of + * performance-critical functions (e.g., @FT_MulFix). You should only do + * that to verify that the assembler function works properly, or to execute + * benchmark tests of the various implementations. + */ +/* #define FT_CONFIG_OPTION_NO_ASSEMBLER */ + + + /************************************************************************** + * + * If this macro is defined, try to use an inlined assembler version of the + * @FT_MulFix function, which is a 'hotspot' when loading and hinting + * glyphs, and which should be executed as fast as possible. + * + * Note that if your compiler or CPU is not supported, this will default to + * the standard and portable implementation found in `ftcalc.c`. + */ +#define FT_CONFIG_OPTION_INLINE_MULFIX + + + /************************************************************************** + * + * LZW-compressed file support. + * + * FreeType now handles font files that have been compressed with the + * `compress` program. This is mostly used to parse many of the PCF + * files that come with various X11 distributions. The implementation + * uses NetBSD's `zopen` to partially uncompress the file on the fly (see + * `src/lzw/ftgzip.c`). + * + * Define this macro if you want to enable this 'feature'. + */ +#define FT_CONFIG_OPTION_USE_LZW + + + /************************************************************************** + * + * Gzip-compressed file support. + * + * FreeType now handles font files that have been compressed with the + * `gzip` program. This is mostly used to parse many of the PCF files + * that come with XFree86. The implementation uses 'zlib' to partially + * uncompress the file on the fly (see `src/gzip/ftgzip.c`). + * + * Define this macro if you want to enable this 'feature'. See also the + * macro `FT_CONFIG_OPTION_SYSTEM_ZLIB` below. + */ +#define FT_CONFIG_OPTION_USE_ZLIB + + + /************************************************************************** + * + * ZLib library selection + * + * This macro is only used when `FT_CONFIG_OPTION_USE_ZLIB` is defined. + * It allows FreeType's 'ftgzip' component to link to the system's + * installation of the ZLib library. This is useful on systems like + * Unix or VMS where it generally is already available. + * + * If you let it undefined, the component will use its own copy of the + * zlib sources instead. These have been modified to be included + * directly within the component and **not** export external function + * names. This allows you to link any program with FreeType _and_ ZLib + * without linking conflicts. + * + * Do not `#undef` this macro here since the build system might define + * it for certain configurations only. + * + * If you use a build system like cmake or the `configure` script, + * options set by those programs have precedence, overwriting the value + * here with the configured one. + * + * If you use the GNU make build system directly (that is, without the + * `configure` script) and you define this macro, you also have to pass + * `SYSTEM_ZLIB=yes` as an argument to make. + */ +/* #define FT_CONFIG_OPTION_SYSTEM_ZLIB */ + + + /************************************************************************** + * + * Bzip2-compressed file support. + * + * FreeType now handles font files that have been compressed with the + * `bzip2` program. This is mostly used to parse many of the PCF files + * that come with XFree86. The implementation uses `libbz2` to partially + * uncompress the file on the fly (see `src/bzip2/ftbzip2.c`). Contrary + * to gzip, bzip2 currently is not included and need to use the system + * available bzip2 implementation. + * + * Define this macro if you want to enable this 'feature'. + * + * If you use a build system like cmake or the `configure` script, + * options set by those programs have precedence, overwriting the value + * here with the configured one. + */ +/* #define FT_CONFIG_OPTION_USE_BZIP2 */ + + + /************************************************************************** + * + * Define to disable the use of file stream functions and types, `FILE`, + * `fopen`, etc. Enables the use of smaller system libraries on embedded + * systems that have multiple system libraries, some with or without file + * stream support, in the cases where file stream support is not necessary + * such as memory loading of font files. + */ +/* #define FT_CONFIG_OPTION_DISABLE_STREAM_SUPPORT */ + + + /************************************************************************** + * + * PNG bitmap support. + * + * FreeType now handles loading color bitmap glyphs in the PNG format. + * This requires help from the external libpng library. Uncompressed + * color bitmaps do not need any external libraries and will be supported + * regardless of this configuration. + * + * Define this macro if you want to enable this 'feature'. + * + * If you use a build system like cmake or the `configure` script, + * options set by those programs have precedence, overwriting the value + * here with the configured one. + */ +/* #define FT_CONFIG_OPTION_USE_PNG */ + + + /************************************************************************** + * + * HarfBuzz support. + * + * FreeType uses the HarfBuzz library to improve auto-hinting of OpenType + * fonts. If available, many glyphs not directly addressable by a font's + * character map will be hinted also. + * + * Define this macro if you want to enable this 'feature'. + * + * If you use a build system like cmake or the `configure` script, + * options set by those programs have precedence, overwriting the value + * here with the configured one. + */ +/* #define FT_CONFIG_OPTION_USE_HARFBUZZ */ + + + /************************************************************************** + * + * Brotli support. + * + * FreeType uses the Brotli library to provide support for decompressing + * WOFF2 streams. + * + * Define this macro if you want to enable this 'feature'. + * + * If you use a build system like cmake or the `configure` script, + * options set by those programs have precedence, overwriting the value + * here with the configured one. + */ +/* #define FT_CONFIG_OPTION_USE_BROTLI */ + + + /************************************************************************** + * + * Glyph Postscript Names handling + * + * By default, FreeType 2 is compiled with the 'psnames' module. This + * module is in charge of converting a glyph name string into a Unicode + * value, or return a Macintosh standard glyph name for the use with the + * TrueType 'post' table. + * + * Undefine this macro if you do not want 'psnames' compiled in your + * build of FreeType. This has the following effects: + * + * - The TrueType driver will provide its own set of glyph names, if you + * build it to support postscript names in the TrueType 'post' table, + * but will not synthesize a missing Unicode charmap. + * + * - The Type~1 driver will not be able to synthesize a Unicode charmap + * out of the glyphs found in the fonts. + * + * You would normally undefine this configuration macro when building a + * version of FreeType that doesn't contain a Type~1 or CFF driver. + */ +#define FT_CONFIG_OPTION_POSTSCRIPT_NAMES + + + /************************************************************************** + * + * Postscript Names to Unicode Values support + * + * By default, FreeType~2 is built with the 'psnames' module compiled in. + * Among other things, the module is used to convert a glyph name into a + * Unicode value. This is especially useful in order to synthesize on + * the fly a Unicode charmap from the CFF/Type~1 driver through a big + * table named the 'Adobe Glyph List' (AGL). + * + * Undefine this macro if you do not want the Adobe Glyph List compiled + * in your 'psnames' module. The Type~1 driver will not be able to + * synthesize a Unicode charmap out of the glyphs found in the fonts. + */ +#define FT_CONFIG_OPTION_ADOBE_GLYPH_LIST + + + /************************************************************************** + * + * Support for Mac fonts + * + * Define this macro if you want support for outline fonts in Mac format + * (mac dfont, mac resource, macbinary containing a mac resource) on + * non-Mac platforms. + * + * Note that the 'FOND' resource isn't checked. + */ +#define FT_CONFIG_OPTION_MAC_FONTS + + + /************************************************************************** + * + * Guessing methods to access embedded resource forks + * + * Enable extra Mac fonts support on non-Mac platforms (e.g., GNU/Linux). + * + * Resource forks which include fonts data are stored sometimes in + * locations which users or developers don't expected. In some cases, + * resource forks start with some offset from the head of a file. In + * other cases, the actual resource fork is stored in file different from + * what the user specifies. If this option is activated, FreeType tries + * to guess whether such offsets or different file names must be used. + * + * Note that normal, direct access of resource forks is controlled via + * the `FT_CONFIG_OPTION_MAC_FONTS` option. + */ +#ifdef FT_CONFIG_OPTION_MAC_FONTS +#define FT_CONFIG_OPTION_GUESSING_EMBEDDED_RFORK +#endif + + + /************************************************************************** + * + * Allow the use of `FT_Incremental_Interface` to load typefaces that + * contain no glyph data, but supply it via a callback function. This is + * required by clients supporting document formats which supply font data + * incrementally as the document is parsed, such as the Ghostscript + * interpreter for the PostScript language. + */ +#define FT_CONFIG_OPTION_INCREMENTAL + + + /************************************************************************** + * + * The size in bytes of the render pool used by the scan-line converter to + * do all of its work. + */ +#define FT_RENDER_POOL_SIZE 16384L + + + /************************************************************************** + * + * FT_MAX_MODULES + * + * The maximum number of modules that can be registered in a single + * FreeType library object. 32~is the default. + */ +#define FT_MAX_MODULES 32 + + + /************************************************************************** + * + * Debug level + * + * FreeType can be compiled in debug or trace mode. In debug mode, + * errors are reported through the 'ftdebug' component. In trace mode, + * additional messages are sent to the standard output during execution. + * + * Define `FT_DEBUG_LEVEL_ERROR` to build the library in debug mode. + * Define `FT_DEBUG_LEVEL_TRACE` to build it in trace mode. + * + * Don't define any of these macros to compile in 'release' mode! + * + * Do not `#undef` these macros here since the build system might define + * them for certain configurations only. + */ +/* #define FT_DEBUG_LEVEL_ERROR */ +/* #define FT_DEBUG_LEVEL_TRACE */ + + + /************************************************************************** + * + * Logging + * + * Compiling FreeType in debug or trace mode makes FreeType write error + * and trace log messages to `stderr`. Enabling this macro + * automatically forces the `FT_DEBUG_LEVEL_ERROR` and + * `FT_DEBUG_LEVEL_TRACE` macros and allows FreeType to write error and + * trace log messages to a file instead of `stderr`. For writing logs + * to a file, FreeType uses an the external `dlg` library (the source + * code is in `src/dlg`). + * + * This option needs a C99 compiler. + */ +/* #define FT_DEBUG_LOGGING */ + + + /************************************************************************** + * + * Autofitter debugging + * + * If `FT_DEBUG_AUTOFIT` is defined, FreeType provides some means to + * control the autofitter behaviour for debugging purposes with global + * boolean variables (consequently, you should **never** enable this + * while compiling in 'release' mode): + * + * ``` + * af_debug_disable_horz_hints_ + * af_debug_disable_vert_hints_ + * af_debug_disable_blue_hints_ + * ``` + * + * Additionally, the following functions provide dumps of various + * internal autofit structures to stdout (using `printf`): + * + * ``` + * af_glyph_hints_dump_points + * af_glyph_hints_dump_segments + * af_glyph_hints_dump_edges + * af_glyph_hints_get_num_segments + * af_glyph_hints_get_segment_offset + * ``` + * + * As an argument, they use another global variable: + * + * ``` + * af_debug_hints_ + * ``` + * + * Please have a look at the `ftgrid` demo program to see how those + * variables and macros should be used. + * + * Do not `#undef` these macros here since the build system might define + * them for certain configurations only. + */ +/* #define FT_DEBUG_AUTOFIT */ + + + /************************************************************************** + * + * Memory Debugging + * + * FreeType now comes with an integrated memory debugger that is capable + * of detecting simple errors like memory leaks or double deletes. To + * compile it within your build of the library, you should define + * `FT_DEBUG_MEMORY` here. + * + * Note that the memory debugger is only activated at runtime when when + * the _environment_ variable `FT2_DEBUG_MEMORY` is defined also! + * + * Do not `#undef` this macro here since the build system might define it + * for certain configurations only. + */ +/* #define FT_DEBUG_MEMORY */ + + + /************************************************************************** + * + * Module errors + * + * If this macro is set (which is _not_ the default), the higher byte of + * an error code gives the module in which the error has occurred, while + * the lower byte is the real error code. + * + * Setting this macro makes sense for debugging purposes only, since it + * would break source compatibility of certain programs that use + * FreeType~2. + * + * More details can be found in the files `ftmoderr.h` and `fterrors.h`. + */ +#undef FT_CONFIG_OPTION_USE_MODULE_ERRORS + + + /************************************************************************** + * + * OpenType SVG Glyph Support + * + * Setting this macro enables support for OpenType SVG glyphs. By + * default, FreeType can only fetch SVG documents. However, it can also + * render them if external rendering hook functions are plugged in at + * runtime. + * + * More details on the hooks can be found in file `otsvg.h`. + */ +#define FT_CONFIG_OPTION_SVG + + + /************************************************************************** + * + * Error Strings + * + * If this macro is set, `FT_Error_String` will return meaningful + * descriptions. This is not enabled by default to reduce the overall + * size of FreeType. + * + * More details can be found in the file `fterrors.h`. + */ +/* #define FT_CONFIG_OPTION_ERROR_STRINGS */ + + + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** S F N T D R I V E R C O N F I G U R A T I O N ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_EMBEDDED_BITMAPS` if you want to support + * embedded bitmaps in all formats using the 'sfnt' module (namely + * TrueType~& OpenType). + */ +#define TT_CONFIG_OPTION_EMBEDDED_BITMAPS + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_COLOR_LAYERS` if you want to support colored + * outlines (from the 'COLR'/'CPAL' tables) in all formats using the 'sfnt' + * module (namely TrueType~& OpenType). + */ +#define TT_CONFIG_OPTION_COLOR_LAYERS + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_POSTSCRIPT_NAMES` if you want to be able to + * load and enumerate Postscript names of glyphs in a TrueType or OpenType + * file. + * + * Note that if you do not compile the 'psnames' module by undefining the + * above `FT_CONFIG_OPTION_POSTSCRIPT_NAMES` macro, the 'sfnt' module will + * contain additional code to read the PostScript name table from a font. + * + * (By default, the module uses 'psnames' to extract glyph names.) + */ +#define TT_CONFIG_OPTION_POSTSCRIPT_NAMES + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_SFNT_NAMES` if your applications need to access + * the internal name table in a SFNT-based format like TrueType or + * OpenType. The name table contains various strings used to describe the + * font, like family name, copyright, version, etc. It does not contain + * any glyph name though. + * + * Accessing SFNT names is done through the functions declared in + * `ftsnames.h`. + */ +#define TT_CONFIG_OPTION_SFNT_NAMES + + + /************************************************************************** + * + * TrueType CMap support + * + * Here you can fine-tune which TrueType CMap table format shall be + * supported. + */ +#define TT_CONFIG_CMAP_FORMAT_0 +#define TT_CONFIG_CMAP_FORMAT_2 +#define TT_CONFIG_CMAP_FORMAT_4 +#define TT_CONFIG_CMAP_FORMAT_6 +#define TT_CONFIG_CMAP_FORMAT_8 +#define TT_CONFIG_CMAP_FORMAT_10 +#define TT_CONFIG_CMAP_FORMAT_12 +#define TT_CONFIG_CMAP_FORMAT_13 +#define TT_CONFIG_CMAP_FORMAT_14 + + + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** T R U E T Y P E D R I V E R C O N F I G U R A T I O N ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_BYTECODE_INTERPRETER` if you want to compile a + * bytecode interpreter in the TrueType driver. + * + * By undefining this, you will only compile the code necessary to load + * TrueType glyphs without hinting. + * + * Do not `#undef` this macro here, since the build system might define it + * for certain configurations only. + */ +#define TT_CONFIG_OPTION_BYTECODE_INTERPRETER + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_SUBPIXEL_HINTING` if you want to compile + * subpixel hinting support into the TrueType driver. This modifies the + * TrueType hinting mechanism when anything but `FT_RENDER_MODE_MONO` is + * requested. + * + * In particular, it modifies the bytecode interpreter to interpret (or + * not) instructions in a certain way so that all TrueType fonts look like + * they do in a Windows ClearType (DirectWrite) environment. See [1] for a + * technical overview on what this means. See `ttinterp.h` for more + * details on this option. + * + * The new default mode focuses on applying a minimal set of rules to all + * fonts indiscriminately so that modern and web fonts render well while + * legacy fonts render okay. The corresponding interpreter version is v40. + * The so-called Infinality mode (v38) is no longer available in FreeType. + * + * By undefining these, you get rendering behavior like on Windows without + * ClearType, i.e., Windows XP without ClearType enabled and Win9x + * (interpreter version v35). Or not, depending on how much hinting blood + * and testing tears the font designer put into a given font. If you + * define one or both subpixel hinting options, you can switch between + * between v35 and the ones you define (using `FT_Property_Set`). + * + * This option requires `TT_CONFIG_OPTION_BYTECODE_INTERPRETER` to be + * defined. + * + * [1] + * https://www.microsoft.com/typography/cleartype/truetypecleartype.aspx + */ +#define TT_CONFIG_OPTION_SUBPIXEL_HINTING + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_COMPONENT_OFFSET_SCALED` to compile the + * TrueType glyph loader to use Apple's definition of how to handle + * component offsets in composite glyphs. + * + * Apple and MS disagree on the default behavior of component offsets in + * composites. Apple says that they should be scaled by the scaling + * factors in the transformation matrix (roughly, it's more complex) while + * MS says they should not. OpenType defines two bits in the composite + * flags array which can be used to disambiguate, but old fonts will not + * have them. + * + * https://www.microsoft.com/typography/otspec/glyf.htm + * https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6glyf.html + */ +#undef TT_CONFIG_OPTION_COMPONENT_OFFSET_SCALED + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_GX_VAR_SUPPORT` if you want to include support + * for Apple's distortable font technology ('fvar', 'gvar', 'cvar', and + * 'avar' tables). Tagged 'Font Variations', this is now part of OpenType + * also. This has many similarities to Type~1 Multiple Masters support. + */ +#define TT_CONFIG_OPTION_GX_VAR_SUPPORT + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_NO_BORING_EXPANSION` if you want to exclude + * support for 'boring' OpenType specification expansions. + * + * https://github.com/harfbuzz/boring-expansion-spec + * + * Right now, the following features are covered: + * + * - 'avar' version 2.0 + * + * Most likely, this is a temporary configuration option to be removed in + * the near future, since it is assumed that eventually those features are + * added to the OpenType standard. + */ +/* #define TT_CONFIG_OPTION_NO_BORING_EXPANSION */ + + + /************************************************************************** + * + * Define `TT_CONFIG_OPTION_BDF` if you want to include support for an + * embedded 'BDF~' table within SFNT-based bitmap formats. + */ +#define TT_CONFIG_OPTION_BDF + + + /************************************************************************** + * + * Option `TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES` controls the maximum + * number of bytecode instructions executed for a single run of the + * bytecode interpreter, needed to prevent infinite loops. You don't want + * to change this except for very special situations (e.g., making a + * library fuzzer spend less time to handle broken fonts). + * + * It is not expected that this value is ever modified by a configuring + * script; instead, it gets surrounded with `#ifndef ... #endif` so that + * the value can be set as a preprocessor option on the compiler's command + * line. + */ +#ifndef TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES +#define TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES 1000000L +#endif + + + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** T Y P E 1 D R I V E R C O N F I G U R A T I O N ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * `T1_MAX_DICT_DEPTH` is the maximum depth of nest dictionaries and arrays + * in the Type~1 stream (see `t1load.c`). A minimum of~4 is required. + */ +#define T1_MAX_DICT_DEPTH 5 + + + /************************************************************************** + * + * `T1_MAX_SUBRS_CALLS` details the maximum number of nested sub-routine + * calls during glyph loading. + */ +#define T1_MAX_SUBRS_CALLS 16 + + + /************************************************************************** + * + * `T1_MAX_CHARSTRING_OPERANDS` is the charstring stack's capacity. A + * minimum of~16 is required. + * + * The Chinese font 'MingTiEG-Medium' (covering the CNS 11643 character + * set) needs 256. + */ +#define T1_MAX_CHARSTRINGS_OPERANDS 256 + + + /************************************************************************** + * + * Define this configuration macro if you want to prevent the compilation + * of the 't1afm' module, which is in charge of reading Type~1 AFM files + * into an existing face. Note that if set, the Type~1 driver will be + * unable to produce kerning distances. + */ +#undef T1_CONFIG_OPTION_NO_AFM + + + /************************************************************************** + * + * Define this configuration macro if you want to prevent the compilation + * of the Multiple Masters font support in the Type~1 driver. + */ +#undef T1_CONFIG_OPTION_NO_MM_SUPPORT + + + /************************************************************************** + * + * `T1_CONFIG_OPTION_OLD_ENGINE` controls whether the pre-Adobe Type~1 + * engine gets compiled into FreeType. If defined, it is possible to + * switch between the two engines using the `hinting-engine` property of + * the 'type1' driver module. + */ +/* #define T1_CONFIG_OPTION_OLD_ENGINE */ + + + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** C F F D R I V E R C O N F I G U R A T I O N ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * Using `CFF_CONFIG_OPTION_DARKENING_PARAMETER_{X,Y}{1,2,3,4}` it is + * possible to set up the default values of the four control points that + * define the stem darkening behaviour of the (new) CFF engine. For more + * details please read the documentation of the `darkening-parameters` + * property (file `ftdriver.h`), which allows the control at run-time. + * + * Do **not** undefine these macros! + */ +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 500 +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 400 + +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 1000 +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 275 + +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 1667 +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 275 + +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 2333 +#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 0 + + + /************************************************************************** + * + * `CFF_CONFIG_OPTION_OLD_ENGINE` controls whether the pre-Adobe CFF engine + * gets compiled into FreeType. If defined, it is possible to switch + * between the two engines using the `hinting-engine` property of the 'cff' + * driver module. + */ +/* #define CFF_CONFIG_OPTION_OLD_ENGINE */ + + + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** P C F D R I V E R C O N F I G U R A T I O N ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * There are many PCF fonts just called 'Fixed' which look completely + * different, and which have nothing to do with each other. When selecting + * 'Fixed' in KDE or Gnome one gets results that appear rather random, the + * style changes often if one changes the size and one cannot select some + * fonts at all. This option makes the 'pcf' module prepend the foundry + * name (plus a space) to the family name. + * + * We also check whether we have 'wide' characters; all put together, we + * get family names like 'Sony Fixed' or 'Misc Fixed Wide'. + * + * If this option is activated, it can be controlled with the + * `no-long-family-names` property of the 'pcf' driver module. + */ +/* #define PCF_CONFIG_OPTION_LONG_FAMILY_NAMES */ + + + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** A U T O F I T M O D U L E C O N F I G U R A T I O N ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * Compile 'autofit' module with CJK (Chinese, Japanese, Korean) script + * support. + */ +#define AF_CONFIG_OPTION_CJK + + + /************************************************************************** + * + * Compile 'autofit' module with fallback Indic script support, covering + * some scripts that the 'latin' submodule of the 'autofit' module doesn't + * (yet) handle. Currently, this needs option `AF_CONFIG_OPTION_CJK`. + */ +#ifdef AF_CONFIG_OPTION_CJK +#define AF_CONFIG_OPTION_INDIC +#endif + + + /************************************************************************** + * + * Use TrueType-like size metrics for 'light' auto-hinting. + * + * It is strongly recommended to avoid this option, which exists only to + * help some legacy applications retain its appearance and behaviour with + * respect to auto-hinted TrueType fonts. + * + * The very reason this option exists at all are GNU/Linux distributions + * like Fedora that did not un-patch the following change (which was + * present in FreeType between versions 2.4.6 and 2.7.1, inclusive). + * + * ``` + * 2011-07-16 Steven Chu + * + * [truetype] Fix metrics on size request for scalable fonts. + * ``` + * + * This problematic commit is now reverted (more or less). + */ +/* #define AF_CONFIG_OPTION_TT_SIZE_METRICS */ + + /* */ + + + /* + * This macro is obsolete. Support has been removed in FreeType version + * 2.5. + */ +/* #define FT_CONFIG_OPTION_OLD_INTERNALS */ + + + /* + * The next two macros are defined if native TrueType hinting is + * requested by the definitions above. Don't change this. + */ +#ifdef TT_CONFIG_OPTION_BYTECODE_INTERPRETER +#define TT_USE_BYTECODE_INTERPRETER +#ifdef TT_CONFIG_OPTION_SUBPIXEL_HINTING +#define TT_SUPPORT_SUBPIXEL_HINTING_MINIMAL +#endif +#endif + + + /* + * The TT_SUPPORT_COLRV1 macro is defined to indicate to clients that this + * version of FreeType has support for 'COLR' v1 API. This definition is + * useful to FreeType clients that want to build in support for 'COLR' v1 + * depending on a tip-of-tree checkout before it is officially released in + * FreeType, and while the feature cannot yet be tested against using + * version macros. Don't change this macro. This may be removed once the + * feature is in a FreeType release version and version macros can be used + * to test for availability. + */ +#ifdef TT_CONFIG_OPTION_COLOR_LAYERS +#define TT_SUPPORT_COLRV1 +#endif + + + /* + * Check CFF darkening parameters. The checks are the same as in function + * `cff_property_set` in file `cffdrivr.c`. + */ +#if CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 < 0 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 < 0 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 < 0 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 < 0 || \ + \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 < 0 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 < 0 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 < 0 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 < 0 || \ + \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 > \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 > \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 > \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 || \ + \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 > 500 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 > 500 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 > 500 || \ + CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 > 500 +#error "Invalid CFF darkening parameters!" +#endif + + +FT_END_HEADER + +#endif /* FTOPTION_H_ */ + + +/* END */ diff --git a/Example/include/freetype/config/ftstdlib.h b/Example/include/freetype/config/ftstdlib.h new file mode 100644 index 0000000..f65148a --- /dev/null +++ b/Example/include/freetype/config/ftstdlib.h @@ -0,0 +1,185 @@ +/**************************************************************************** + * + * ftstdlib.h + * + * ANSI-specific library and header configuration file (specification + * only). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This file is used to group all `#includes` to the ANSI~C library that + * FreeType normally requires. It also defines macros to rename the + * standard functions within the FreeType source code. + * + * Load a file which defines `FTSTDLIB_H_` before this one to override it. + * + */ + + +#ifndef FTSTDLIB_H_ +#define FTSTDLIB_H_ + + +#include + +#define ft_ptrdiff_t ptrdiff_t + + + /************************************************************************** + * + * integer limits + * + * `UINT_MAX` and `ULONG_MAX` are used to automatically compute the size of + * `int` and `long` in bytes at compile-time. So far, this works for all + * platforms the library has been tested on. We also check `ULLONG_MAX` + * to see whether we can use 64-bit `long long` later on. + * + * Note that on the extremely rare platforms that do not provide integer + * types that are _exactly_ 16 and 32~bits wide (e.g., some old Crays where + * `int` is 36~bits), we do not make any guarantee about the correct + * behaviour of FreeType~2 with all fonts. + * + * In these cases, `ftconfig.h` will refuse to compile anyway with a + * message like 'couldn't find 32-bit type' or something similar. + * + */ + + +#include + +#define FT_CHAR_BIT CHAR_BIT +#define FT_USHORT_MAX USHRT_MAX +#define FT_INT_MAX INT_MAX +#define FT_INT_MIN INT_MIN +#define FT_UINT_MAX UINT_MAX +#define FT_LONG_MIN LONG_MIN +#define FT_LONG_MAX LONG_MAX +#define FT_ULONG_MAX ULONG_MAX +#ifdef LLONG_MAX +#define FT_LLONG_MAX LLONG_MAX +#endif +#ifdef LLONG_MIN +#define FT_LLONG_MIN LLONG_MIN +#endif +#ifdef ULLONG_MAX +#define FT_ULLONG_MAX ULLONG_MAX +#endif + + + /************************************************************************** + * + * character and string processing + * + */ + + +#include + +#define ft_memchr memchr +#define ft_memcmp memcmp +#define ft_memcpy memcpy +#define ft_memmove memmove +#define ft_memset memset +#define ft_strcat strcat +#define ft_strcmp strcmp +#define ft_strcpy strcpy +#define ft_strlen strlen +#define ft_strncmp strncmp +#define ft_strncpy strncpy +#define ft_strrchr strrchr +#define ft_strstr strstr + + + /************************************************************************** + * + * file handling + * + */ + + +#include + +#define FT_FILE FILE +#define ft_fclose fclose +#define ft_fopen fopen +#define ft_fread fread +#define ft_fseek fseek +#define ft_ftell ftell +#define ft_snprintf snprintf + + + /************************************************************************** + * + * sorting + * + */ + + +#include + +#define ft_qsort qsort + + + /************************************************************************** + * + * memory allocation + * + */ + + +#define ft_scalloc calloc +#define ft_sfree free +#define ft_smalloc malloc +#define ft_srealloc realloc + + + /************************************************************************** + * + * miscellaneous + * + */ + + +#define ft_strtol strtol +#define ft_getenv getenv + + + /************************************************************************** + * + * execution control + * + */ + + +#include + +#define ft_jmp_buf jmp_buf /* note: this cannot be a typedef since */ + /* `jmp_buf` is defined as a macro */ + /* on certain platforms */ + +#define ft_longjmp longjmp +#define ft_setjmp( b ) setjmp( *(ft_jmp_buf*) &(b) ) /* same thing here */ + + + /* The following is only used for debugging purposes, i.e., if */ + /* `FT_DEBUG_LEVEL_ERROR` or `FT_DEBUG_LEVEL_TRACE` are defined. */ + +#include + + +#endif /* FTSTDLIB_H_ */ + + +/* END */ diff --git a/Example/include/freetype/config/integer-types.h b/Example/include/freetype/config/integer-types.h new file mode 100644 index 0000000..7258b50 --- /dev/null +++ b/Example/include/freetype/config/integer-types.h @@ -0,0 +1,250 @@ +/**************************************************************************** + * + * config/integer-types.h + * + * FreeType integer types definitions. + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ +#ifndef FREETYPE_CONFIG_INTEGER_TYPES_H_ +#define FREETYPE_CONFIG_INTEGER_TYPES_H_ + + /* There are systems (like the Texas Instruments 'C54x) where a `char` */ + /* has 16~bits. ANSI~C says that `sizeof(char)` is always~1. Since an */ + /* `int` has 16~bits also for this system, `sizeof(int)` gives~1 which */ + /* is probably unexpected. */ + /* */ + /* `CHAR_BIT` (defined in `limits.h`) gives the number of bits in a */ + /* `char` type. */ + +#ifndef FT_CHAR_BIT +#define FT_CHAR_BIT CHAR_BIT +#endif + +#ifndef FT_SIZEOF_INT + + /* The size of an `int` type. */ +#if FT_UINT_MAX == 0xFFFFUL +#define FT_SIZEOF_INT ( 16 / FT_CHAR_BIT ) +#elif FT_UINT_MAX == 0xFFFFFFFFUL +#define FT_SIZEOF_INT ( 32 / FT_CHAR_BIT ) +#elif FT_UINT_MAX > 0xFFFFFFFFUL && FT_UINT_MAX == 0xFFFFFFFFFFFFFFFFUL +#define FT_SIZEOF_INT ( 64 / FT_CHAR_BIT ) +#else +#error "Unsupported size of `int' type!" +#endif + +#endif /* !defined(FT_SIZEOF_INT) */ + +#ifndef FT_SIZEOF_LONG + + /* The size of a `long` type. A five-byte `long` (as used e.g. on the */ + /* DM642) is recognized but avoided. */ +#if FT_ULONG_MAX == 0xFFFFFFFFUL +#define FT_SIZEOF_LONG ( 32 / FT_CHAR_BIT ) +#elif FT_ULONG_MAX > 0xFFFFFFFFUL && FT_ULONG_MAX == 0xFFFFFFFFFFUL +#define FT_SIZEOF_LONG ( 32 / FT_CHAR_BIT ) +#elif FT_ULONG_MAX > 0xFFFFFFFFUL && FT_ULONG_MAX == 0xFFFFFFFFFFFFFFFFUL +#define FT_SIZEOF_LONG ( 64 / FT_CHAR_BIT ) +#else +#error "Unsupported size of `long' type!" +#endif + +#endif /* !defined(FT_SIZEOF_LONG) */ + +#ifndef FT_SIZEOF_LONG_LONG + + /* The size of a `long long` type if available */ +#if defined( FT_ULLONG_MAX ) && FT_ULLONG_MAX >= 0xFFFFFFFFFFFFFFFFULL +#define FT_SIZEOF_LONG_LONG ( 64 / FT_CHAR_BIT ) +#else +#define FT_SIZEOF_LONG_LONG 0 +#endif + +#endif /* !defined(FT_SIZEOF_LONG_LONG) */ + + + /************************************************************************** + * + * @section: + * basic_types + * + */ + + + /************************************************************************** + * + * @type: + * FT_Int16 + * + * @description: + * A typedef for a 16bit signed integer type. + */ + typedef signed short FT_Int16; + + + /************************************************************************** + * + * @type: + * FT_UInt16 + * + * @description: + * A typedef for a 16bit unsigned integer type. + */ + typedef unsigned short FT_UInt16; + + /* */ + + + /* this #if 0 ... #endif clause is for documentation purposes */ +#if 0 + + /************************************************************************** + * + * @type: + * FT_Int32 + * + * @description: + * A typedef for a 32bit signed integer type. The size depends on the + * configuration. + */ + typedef signed XXX FT_Int32; + + + /************************************************************************** + * + * @type: + * FT_UInt32 + * + * A typedef for a 32bit unsigned integer type. The size depends on the + * configuration. + */ + typedef unsigned XXX FT_UInt32; + + + /************************************************************************** + * + * @type: + * FT_Int64 + * + * A typedef for a 64bit signed integer type. The size depends on the + * configuration. Only defined if there is real 64bit support; + * otherwise, it gets emulated with a structure (if necessary). + */ + typedef signed XXX FT_Int64; + + + /************************************************************************** + * + * @type: + * FT_UInt64 + * + * A typedef for a 64bit unsigned integer type. The size depends on the + * configuration. Only defined if there is real 64bit support; + * otherwise, it gets emulated with a structure (if necessary). + */ + typedef unsigned XXX FT_UInt64; + + /* */ + +#endif + +#if FT_SIZEOF_INT == ( 32 / FT_CHAR_BIT ) + + typedef signed int FT_Int32; + typedef unsigned int FT_UInt32; + +#elif FT_SIZEOF_LONG == ( 32 / FT_CHAR_BIT ) + + typedef signed long FT_Int32; + typedef unsigned long FT_UInt32; + +#else +#error "no 32bit type found -- please check your configuration files" +#endif + + + /* look up an integer type that is at least 32~bits */ +#if FT_SIZEOF_INT >= ( 32 / FT_CHAR_BIT ) + + typedef int FT_Fast; + typedef unsigned int FT_UFast; + +#elif FT_SIZEOF_LONG >= ( 32 / FT_CHAR_BIT ) + + typedef long FT_Fast; + typedef unsigned long FT_UFast; + +#endif + + + /* determine whether we have a 64-bit integer type */ +#if FT_SIZEOF_LONG == ( 64 / FT_CHAR_BIT ) + +#define FT_INT64 long +#define FT_UINT64 unsigned long + +#elif FT_SIZEOF_LONG_LONG >= ( 64 / FT_CHAR_BIT ) + +#define FT_INT64 long long int +#define FT_UINT64 unsigned long long int + + /************************************************************************** + * + * A 64-bit data type may create compilation problems if you compile in + * strict ANSI mode. To avoid them, we disable other 64-bit data types if + * `__STDC__` is defined. You can however ignore this rule by defining the + * `FT_CONFIG_OPTION_FORCE_INT64` configuration macro. + */ +#elif !defined( __STDC__ ) || defined( FT_CONFIG_OPTION_FORCE_INT64 ) + +#if defined( _MSC_VER ) && _MSC_VER >= 900 /* Visual C++ (and Intel C++) */ + + /* this compiler provides the `__int64` type */ +#define FT_INT64 __int64 +#define FT_UINT64 unsigned __int64 + +#elif defined( __BORLANDC__ ) /* Borland C++ */ + + /* XXXX: We should probably check the value of `__BORLANDC__` in order */ + /* to test the compiler version. */ + + /* this compiler provides the `__int64` type */ +#define FT_INT64 __int64 +#define FT_UINT64 unsigned __int64 + +#elif defined( __WATCOMC__ ) && __WATCOMC__ >= 1100 /* Watcom C++ */ + +#define FT_INT64 long long int +#define FT_UINT64 unsigned long long int + +#elif defined( __MWERKS__ ) /* Metrowerks CodeWarrior */ + +#define FT_INT64 long long int +#define FT_UINT64 unsigned long long int + +#elif defined( __GNUC__ ) + + /* GCC provides the `long long` type */ +#define FT_INT64 long long int +#define FT_UINT64 unsigned long long int + +#endif /* !__STDC__ */ + +#endif /* FT_SIZEOF_LONG == (64 / FT_CHAR_BIT) */ + +#ifdef FT_INT64 + typedef FT_INT64 FT_Int64; + typedef FT_UINT64 FT_UInt64; +#endif + + +#endif /* FREETYPE_CONFIG_INTEGER_TYPES_H_ */ diff --git a/Example/include/freetype/config/mac-support.h b/Example/include/freetype/config/mac-support.h new file mode 100644 index 0000000..b77b96d --- /dev/null +++ b/Example/include/freetype/config/mac-support.h @@ -0,0 +1,49 @@ +/**************************************************************************** + * + * config/mac-support.h + * + * Mac/OS X support configuration header. + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ +#ifndef FREETYPE_CONFIG_MAC_SUPPORT_H_ +#define FREETYPE_CONFIG_MAC_SUPPORT_H_ + + /************************************************************************** + * + * Mac support + * + * This is the only necessary change, so it is defined here instead + * providing a new configuration file. + */ +#if defined( __APPLE__ ) || ( defined( __MWERKS__ ) && defined( macintosh ) ) + /* No Carbon frameworks for 64bit 10.4.x. */ + /* `AvailabilityMacros.h` is available since Mac OS X 10.2, */ + /* so guess the system version by maximum errno before inclusion. */ +#include +#ifdef ECANCELED /* defined since 10.2 */ +#include "AvailabilityMacros.h" +#endif +#if defined( __LP64__ ) && \ + ( MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4 ) +#undef FT_MACINTOSH +#endif + +#elif defined( __SC__ ) || defined( __MRC__ ) + /* Classic MacOS compilers */ +#include "ConditionalMacros.h" +#if TARGET_OS_MAC +#define FT_MACINTOSH 1 +#endif + +#endif /* Mac support */ + +#endif /* FREETYPE_CONFIG_MAC_SUPPORT_H_ */ diff --git a/Example/include/freetype/config/public-macros.h b/Example/include/freetype/config/public-macros.h new file mode 100644 index 0000000..23d0fa6 --- /dev/null +++ b/Example/include/freetype/config/public-macros.h @@ -0,0 +1,138 @@ +/**************************************************************************** + * + * config/public-macros.h + * + * Define a set of compiler macros used in public FreeType headers. + * + * Copyright (C) 2020-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + /* + * The definitions in this file are used by the public FreeType headers + * and thus should be considered part of the public API. + * + * Other compiler-specific macro definitions that are not exposed by the + * FreeType API should go into + * `include/freetype/internal/compiler-macros.h` instead. + */ +#ifndef FREETYPE_CONFIG_PUBLIC_MACROS_H_ +#define FREETYPE_CONFIG_PUBLIC_MACROS_H_ + + /* + * `FT_BEGIN_HEADER` and `FT_END_HEADER` might have already been defined + * by `freetype/config/ftheader.h`, but we don't want to include this + * header here, so redefine the macros here only when needed. Their + * definition is very stable, so keeping them in sync with the ones in the + * header should not be a maintenance issue. + */ +#ifndef FT_BEGIN_HEADER +#ifdef __cplusplus +#define FT_BEGIN_HEADER extern "C" { +#else +#define FT_BEGIN_HEADER /* empty */ +#endif +#endif /* FT_BEGIN_HEADER */ + +#ifndef FT_END_HEADER +#ifdef __cplusplus +#define FT_END_HEADER } +#else +#define FT_END_HEADER /* empty */ +#endif +#endif /* FT_END_HEADER */ + + +FT_BEGIN_HEADER + + /* + * Mark a function declaration as public. This ensures it will be + * properly exported to client code. Place this before a function + * declaration. + * + * NOTE: This macro should be considered an internal implementation + * detail, and not part of the FreeType API. It is only defined here + * because it is needed by `FT_EXPORT`. + */ + + /* Visual C, mingw */ +#if defined( _WIN32 ) + +#if defined( FT2_BUILD_LIBRARY ) && defined( DLL_EXPORT ) +#define FT_PUBLIC_FUNCTION_ATTRIBUTE __declspec( dllexport ) +#elif defined( DLL_IMPORT ) +#define FT_PUBLIC_FUNCTION_ATTRIBUTE __declspec( dllimport ) +#endif + + /* gcc, clang */ +#elif ( defined( __GNUC__ ) && __GNUC__ >= 4 ) || defined( __clang__ ) +#define FT_PUBLIC_FUNCTION_ATTRIBUTE \ + __attribute__(( visibility( "default" ) )) + + /* Sun */ +#elif defined( __SUNPRO_C ) && __SUNPRO_C >= 0x550 +#define FT_PUBLIC_FUNCTION_ATTRIBUTE __global +#endif + + +#ifndef FT_PUBLIC_FUNCTION_ATTRIBUTE +#define FT_PUBLIC_FUNCTION_ATTRIBUTE /* empty */ +#endif + + + /* + * Define a public FreeType API function. This ensures it is properly + * exported or imported at build time. The macro parameter is the + * function's return type as in: + * + * FT_EXPORT( FT_Bool ) + * FT_Object_Method( FT_Object obj, + * ... ); + * + * NOTE: This requires that all `FT_EXPORT` uses are inside + * `FT_BEGIN_HEADER ... FT_END_HEADER` blocks. This guarantees that the + * functions are exported with C linkage, even when the header is included + * by a C++ source file. + */ +#define FT_EXPORT( x ) FT_PUBLIC_FUNCTION_ATTRIBUTE extern x + + + /* + * `FT_UNUSED` indicates that a given parameter is not used -- this is + * only used to get rid of unpleasant compiler warnings. + * + * Technically, this was not meant to be part of the public API, but some + * third-party code depends on it. + */ +#ifndef FT_UNUSED +#define FT_UNUSED( arg ) ( (arg) = (arg) ) +#endif + + + /* + * Support for casts in both C and C++. + */ +#ifdef __cplusplus +#define FT_STATIC_CAST( type, var ) static_cast(var) +#define FT_REINTERPRET_CAST( type, var ) reinterpret_cast(var) + +#define FT_STATIC_BYTE_CAST( type, var ) \ + static_cast( static_cast( var ) ) +#else +#define FT_STATIC_CAST( type, var ) (type)(var) +#define FT_REINTERPRET_CAST( type, var ) (type)(var) + +#define FT_STATIC_BYTE_CAST( type, var ) (type)(unsigned char)(var) +#endif + + +FT_END_HEADER + +#endif /* FREETYPE_CONFIG_PUBLIC_MACROS_H_ */ diff --git a/Example/include/freetype/freetype.h b/Example/include/freetype/freetype.h new file mode 100644 index 0000000..4a074a4 --- /dev/null +++ b/Example/include/freetype/freetype.h @@ -0,0 +1,5337 @@ +/**************************************************************************** + * + * freetype.h + * + * FreeType high-level API and common types (specification only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FREETYPE_H_ +#define FREETYPE_H_ + + +#include +#include FT_CONFIG_CONFIG_H +#include +#include + + +FT_BEGIN_HEADER + + + + /************************************************************************** + * + * @section: + * preamble + * + * @title: + * Preamble + * + * @abstract: + * What FreeType is and isn't + * + * @description: + * FreeType is a library that provides access to glyphs in font files. It + * scales the glyph images and their metrics to a requested size, and it + * rasterizes the glyph images to produce pixel or subpixel alpha coverage + * bitmaps. + * + * Note that FreeType is _not_ a text layout engine. You have to use + * higher-level libraries like HarfBuzz, Pango, or ICU for that. + * + * Note also that FreeType does _not_ perform alpha blending or + * compositing the resulting bitmaps or pixmaps by itself. Use your + * favourite graphics library (for example, Cairo or Skia) to further + * process FreeType's output. + * + */ + + + /************************************************************************** + * + * @section: + * header_inclusion + * + * @title: + * FreeType's header inclusion scheme + * + * @abstract: + * How client applications should include FreeType header files. + * + * @description: + * To be as flexible as possible (and for historical reasons), you must + * load file `ft2build.h` first before other header files, for example + * + * ``` + * #include + * + * #include + * #include + * ``` + */ + + + /************************************************************************** + * + * @section: + * user_allocation + * + * @title: + * User allocation + * + * @abstract: + * How client applications should allocate FreeType data structures. + * + * @description: + * FreeType assumes that structures allocated by the user and passed as + * arguments are zeroed out except for the actual data. In other words, + * it is recommended to use `calloc` (or variants of it) instead of + * `malloc` for allocation. + * + */ + + + /************************************************************************** + * + * @section: + * font_testing_macros + * + * @title: + * Font Testing Macros + * + * @abstract: + * Macros to test various properties of fonts. + * + * @description: + * Macros to test the most important font properties. + * + * It is recommended to use these high-level macros instead of directly + * testing the corresponding flags, which are scattered over various + * structures. + * + * @order: + * FT_HAS_HORIZONTAL + * FT_HAS_VERTICAL + * FT_HAS_KERNING + * FT_HAS_FIXED_SIZES + * FT_HAS_GLYPH_NAMES + * FT_HAS_COLOR + * FT_HAS_MULTIPLE_MASTERS + * FT_HAS_SVG + * FT_HAS_SBIX + * FT_HAS_SBIX_OVERLAY + * + * FT_IS_SFNT + * FT_IS_SCALABLE + * FT_IS_FIXED_WIDTH + * FT_IS_CID_KEYED + * FT_IS_TRICKY + * FT_IS_NAMED_INSTANCE + * FT_IS_VARIATION + * + */ + + + /************************************************************************** + * + * @section: + * library_setup + * + * @title: + * Library Setup + * + * @abstract: + * Functions to start and end the usage of the FreeType library. + * + * @description: + * Functions to start and end the usage of the FreeType library. + * + * Note that @FT_Library_Version and @FREETYPE_XXX are of limited use + * because even a new release of FreeType with only documentation + * changes increases the version number. + * + * @order: + * FT_Library + * FT_Init_FreeType + * FT_Done_FreeType + * + * FT_Library_Version + * FREETYPE_XXX + * + */ + + + /************************************************************************** + * + * @section: + * face_creation + * + * @title: + * Face Creation + * + * @abstract: + * Functions to manage fonts. + * + * @description: + * The functions and structures collected in this section operate on + * fonts globally. + * + * @order: + * FT_Face + * FT_FaceRec + * FT_FACE_FLAG_XXX + * FT_STYLE_FLAG_XXX + * + * FT_New_Face + * FT_Done_Face + * FT_Reference_Face + * FT_New_Memory_Face + * FT_Face_Properties + * FT_Open_Face + * FT_Open_Args + * FT_OPEN_XXX + * FT_Parameter + * FT_Attach_File + * FT_Attach_Stream + * + */ + + + /************************************************************************** + * + * @section: + * sizing_and_scaling + * + * @title: + * Sizing and Scaling + * + * @abstract: + * Functions to manage font sizes. + * + * @description: + * The functions and structures collected in this section are related to + * selecting and manipulating the size of a font globally. + * + * @order: + * FT_Size + * FT_SizeRec + * FT_Size_Metrics + * + * FT_Bitmap_Size + * + * FT_Set_Char_Size + * FT_Set_Pixel_Sizes + * FT_Request_Size + * FT_Select_Size + * FT_Size_Request_Type + * FT_Size_RequestRec + * FT_Size_Request + * + * FT_Set_Transform + * FT_Get_Transform + * + */ + + + /************************************************************************** + * + * @section: + * glyph_retrieval + * + * @title: + * Glyph Retrieval + * + * @abstract: + * Functions to manage glyphs. + * + * @description: + * The functions and structures collected in this section operate on + * single glyphs, of which @FT_Load_Glyph is most important. + * + * @order: + * FT_GlyphSlot + * FT_GlyphSlotRec + * FT_Glyph_Metrics + * + * FT_Load_Glyph + * FT_LOAD_XXX + * FT_LOAD_TARGET_MODE + * FT_LOAD_TARGET_XXX + * + * FT_Render_Glyph + * FT_Render_Mode + * FT_Get_Kerning + * FT_Kerning_Mode + * FT_Get_Track_Kerning + * + */ + + + /************************************************************************** + * + * @section: + * character_mapping + * + * @title: + * Character Mapping + * + * @abstract: + * Functions to manage character-to-glyph maps. + * + * @description: + * This section holds functions and structures that are related to + * mapping character input codes to glyph indices. + * + * Note that for many scripts the simplistic approach used by FreeType + * of mapping a single character to a single glyph is not valid or + * possible! In general, a higher-level library like HarfBuzz or ICU + * should be used for handling text strings. + * + * @order: + * FT_CharMap + * FT_CharMapRec + * FT_Encoding + * FT_ENC_TAG + * + * FT_Select_Charmap + * FT_Set_Charmap + * FT_Get_Charmap_Index + * + * FT_Get_Char_Index + * FT_Get_First_Char + * FT_Get_Next_Char + * FT_Load_Char + * + */ + + + /************************************************************************** + * + * @section: + * information_retrieval + * + * @title: + * Information Retrieval + * + * @abstract: + * Functions to retrieve font and glyph information. + * + * @description: + * Functions to retrieve font and glyph information. Only some very + * basic data is covered; see also the chapter on the format-specific + * API for more. + * + * + * @order: + * FT_Get_Name_Index + * FT_Get_Glyph_Name + * FT_Get_Postscript_Name + * FT_Get_FSType_Flags + * FT_FSTYPE_XXX + * FT_Get_SubGlyph_Info + * FT_SUBGLYPH_FLAG_XXX + * + */ + + + /************************************************************************** + * + * @section: + * other_api_data + * + * @title: + * Other API Data + * + * @abstract: + * Other structures, enumerations, and macros. + * + * @description: + * Other structures, enumerations, and macros. Deprecated functions are + * also listed here. + * + * @order: + * FT_Face_Internal + * FT_Size_Internal + * FT_Slot_Internal + * + * FT_SubGlyph + * + * FT_HAS_FAST_GLYPHS + * FT_Face_CheckTrueTypePatents + * FT_Face_SetUnpatentedHinting + * + */ + + + /*************************************************************************/ + /*************************************************************************/ + /* */ + /* B A S I C T Y P E S */ + /* */ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @section: + * glyph_retrieval + * + */ + + /************************************************************************** + * + * @struct: + * FT_Glyph_Metrics + * + * @description: + * A structure to model the metrics of a single glyph. The values are + * expressed in 26.6 fractional pixel format; if the flag + * @FT_LOAD_NO_SCALE has been used while loading the glyph, values are + * expressed in font units instead. + * + * @fields: + * width :: + * The glyph's width. + * + * height :: + * The glyph's height. + * + * horiBearingX :: + * Left side bearing for horizontal layout. + * + * horiBearingY :: + * Top side bearing for horizontal layout. + * + * horiAdvance :: + * Advance width for horizontal layout. + * + * vertBearingX :: + * Left side bearing for vertical layout. + * + * vertBearingY :: + * Top side bearing for vertical layout. Larger positive values mean + * further below the vertical glyph origin. + * + * vertAdvance :: + * Advance height for vertical layout. Positive values mean the glyph + * has a positive advance downward. + * + * @note: + * If not disabled with @FT_LOAD_NO_HINTING, the values represent + * dimensions of the hinted glyph (in case hinting is applicable). + * + * Stroking a glyph with an outside border does not increase + * `horiAdvance` or `vertAdvance`; you have to manually adjust these + * values to account for the added width and height. + * + * FreeType doesn't use the 'VORG' table data for CFF fonts because it + * doesn't have an interface to quickly retrieve the glyph height. The + * y~coordinate of the vertical origin can be simply computed as + * `vertBearingY + height` after loading a glyph. + */ + typedef struct FT_Glyph_Metrics_ + { + FT_Pos width; + FT_Pos height; + + FT_Pos horiBearingX; + FT_Pos horiBearingY; + FT_Pos horiAdvance; + + FT_Pos vertBearingX; + FT_Pos vertBearingY; + FT_Pos vertAdvance; + + } FT_Glyph_Metrics; + + + /************************************************************************** + * + * @section: + * sizing_and_scaling + * + */ + + /************************************************************************** + * + * @struct: + * FT_Bitmap_Size + * + * @description: + * This structure models the metrics of a bitmap strike (i.e., a set of + * glyphs for a given point size and resolution) in a bitmap font. It is + * used for the `available_sizes` field of @FT_Face. + * + * @fields: + * height :: + * The vertical distance, in pixels, between two consecutive baselines. + * It is always positive. + * + * width :: + * The average width, in pixels, of all glyphs in the strike. + * + * size :: + * The nominal size of the strike in 26.6 fractional points. This + * field is not very useful. + * + * x_ppem :: + * The horizontal ppem (nominal width) in 26.6 fractional pixels. + * + * y_ppem :: + * The vertical ppem (nominal height) in 26.6 fractional pixels. + * + * @note: + * Windows FNT: + * The nominal size given in a FNT font is not reliable. If the driver + * finds it incorrect, it sets `size` to some calculated values, and + * `x_ppem` and `y_ppem` to the pixel width and height given in the + * font, respectively. + * + * TrueType embedded bitmaps: + * `size`, `width`, and `height` values are not contained in the bitmap + * strike itself. They are computed from the global font parameters. + */ + typedef struct FT_Bitmap_Size_ + { + FT_Short height; + FT_Short width; + + FT_Pos size; + + FT_Pos x_ppem; + FT_Pos y_ppem; + + } FT_Bitmap_Size; + + + /*************************************************************************/ + /*************************************************************************/ + /* */ + /* O B J E C T C L A S S E S */ + /* */ + /*************************************************************************/ + /*************************************************************************/ + + /************************************************************************** + * + * @section: + * library_setup + * + */ + + /************************************************************************** + * + * @type: + * FT_Library + * + * @description: + * A handle to a FreeType library instance. Each 'library' is completely + * independent from the others; it is the 'root' of a set of objects like + * fonts, faces, sizes, etc. + * + * It also embeds a memory manager (see @FT_Memory), as well as a + * scan-line converter object (see @FT_Raster). + * + * [Since 2.5.6] In multi-threaded applications it is easiest to use one + * `FT_Library` object per thread. In case this is too cumbersome, a + * single `FT_Library` object across threads is possible also, as long as + * a mutex lock is used around @FT_New_Face and @FT_Done_Face. + * + * @note: + * Library objects are normally created by @FT_Init_FreeType, and + * destroyed with @FT_Done_FreeType. If you need reference-counting + * (cf. @FT_Reference_Library), use @FT_New_Library and @FT_Done_Library. + */ + typedef struct FT_LibraryRec_ *FT_Library; + + + /************************************************************************** + * + * @section: + * module_management + * + */ + + /************************************************************************** + * + * @type: + * FT_Module + * + * @description: + * A handle to a given FreeType module object. A module can be a font + * driver, a renderer, or anything else that provides services to the + * former. + */ + typedef struct FT_ModuleRec_* FT_Module; + + + /************************************************************************** + * + * @type: + * FT_Driver + * + * @description: + * A handle to a given FreeType font driver object. A font driver is a + * module capable of creating faces from font files. + */ + typedef struct FT_DriverRec_* FT_Driver; + + + /************************************************************************** + * + * @type: + * FT_Renderer + * + * @description: + * A handle to a given FreeType renderer. A renderer is a module in + * charge of converting a glyph's outline image to a bitmap. It supports + * a single glyph image format, and one or more target surface depths. + */ + typedef struct FT_RendererRec_* FT_Renderer; + + + /************************************************************************** + * + * @section: + * face_creation + * + */ + + /************************************************************************** + * + * @type: + * FT_Face + * + * @description: + * A handle to a typographic face object. A face object models a given + * typeface, in a given style. + * + * @note: + * A face object also owns a single @FT_GlyphSlot object, as well as one + * or more @FT_Size objects. + * + * Use @FT_New_Face or @FT_Open_Face to create a new face object from a + * given filepath or a custom input stream. + * + * Use @FT_Done_Face to destroy it (along with its slot and sizes). + * + * An `FT_Face` object can only be safely used from one thread at a time. + * Similarly, creation and destruction of `FT_Face` with the same + * @FT_Library object can only be done from one thread at a time. On the + * other hand, functions like @FT_Load_Glyph and its siblings are + * thread-safe and do not need the lock to be held as long as the same + * `FT_Face` object is not used from multiple threads at the same time. + * + * @also: + * See @FT_FaceRec for the publicly accessible fields of a given face + * object. + */ + typedef struct FT_FaceRec_* FT_Face; + + + /************************************************************************** + * + * @section: + * sizing_and_scaling + * + */ + + /************************************************************************** + * + * @type: + * FT_Size + * + * @description: + * A handle to an object that models a face scaled to a given character + * size. + * + * @note: + * An @FT_Face has one _active_ `FT_Size` object that is used by + * functions like @FT_Load_Glyph to determine the scaling transformation + * that in turn is used to load and hint glyphs and metrics. + * + * A newly created `FT_Size` object contains only meaningless zero values. + * You must use @FT_Set_Char_Size, @FT_Set_Pixel_Sizes, @FT_Request_Size + * or even @FT_Select_Size to change the content (i.e., the scaling + * values) of the active `FT_Size`. Otherwise, the scaling and hinting + * will not be performed. + * + * You can use @FT_New_Size to create additional size objects for a given + * @FT_Face, but they won't be used by other functions until you activate + * it through @FT_Activate_Size. Only one size can be activated at any + * given time per face. + * + * @also: + * See @FT_SizeRec for the publicly accessible fields of a given size + * object. + */ + typedef struct FT_SizeRec_* FT_Size; + + + /************************************************************************** + * + * @section: + * glyph_retrieval + * + */ + + /************************************************************************** + * + * @type: + * FT_GlyphSlot + * + * @description: + * A handle to a given 'glyph slot'. A slot is a container that can hold + * any of the glyphs contained in its parent face. + * + * In other words, each time you call @FT_Load_Glyph or @FT_Load_Char, + * the slot's content is erased by the new glyph data, i.e., the glyph's + * metrics, its image (bitmap or outline), and other control information. + * + * @also: + * See @FT_GlyphSlotRec for the publicly accessible glyph fields. + */ + typedef struct FT_GlyphSlotRec_* FT_GlyphSlot; + + + /************************************************************************** + * + * @section: + * character_mapping + * + */ + + /************************************************************************** + * + * @type: + * FT_CharMap + * + * @description: + * A handle to a character map (usually abbreviated to 'charmap'). A + * charmap is used to translate character codes in a given encoding into + * glyph indexes for its parent's face. Some font formats may provide + * several charmaps per font. + * + * Each face object owns zero or more charmaps, but only one of them can + * be 'active', providing the data used by @FT_Get_Char_Index or + * @FT_Load_Char. + * + * The list of available charmaps in a face is available through the + * `face->num_charmaps` and `face->charmaps` fields of @FT_FaceRec. + * + * The currently active charmap is available as `face->charmap`. You + * should call @FT_Set_Charmap to change it. + * + * @note: + * When a new face is created (either through @FT_New_Face or + * @FT_Open_Face), the library looks for a Unicode charmap within the + * list and automatically activates it. If there is no Unicode charmap, + * FreeType doesn't set an 'active' charmap. + * + * @also: + * See @FT_CharMapRec for the publicly accessible fields of a given + * character map. + */ + typedef struct FT_CharMapRec_* FT_CharMap; + + + /************************************************************************** + * + * @macro: + * FT_ENC_TAG + * + * @description: + * This macro converts four-letter tags into an unsigned long. It is + * used to define 'encoding' identifiers (see @FT_Encoding). + * + * @note: + * Since many 16-bit compilers don't like 32-bit enumerations, you should + * redefine this macro in case of problems to something like this: + * + * ``` + * #define FT_ENC_TAG( value, a, b, c, d ) value + * ``` + * + * to get a simple enumeration without assigning special numbers. + */ + +#ifndef FT_ENC_TAG + +#define FT_ENC_TAG( value, a, b, c, d ) \ + value = ( ( FT_STATIC_BYTE_CAST( FT_UInt32, a ) << 24 ) | \ + ( FT_STATIC_BYTE_CAST( FT_UInt32, b ) << 16 ) | \ + ( FT_STATIC_BYTE_CAST( FT_UInt32, c ) << 8 ) | \ + FT_STATIC_BYTE_CAST( FT_UInt32, d ) ) + +#endif /* FT_ENC_TAG */ + + + /************************************************************************** + * + * @enum: + * FT_Encoding + * + * @description: + * An enumeration to specify character sets supported by charmaps. Used + * in the @FT_Select_Charmap API function. + * + * @note: + * Despite the name, this enumeration lists specific character + * repertoires (i.e., charsets), and not text encoding methods (e.g., + * UTF-8, UTF-16, etc.). + * + * Other encodings might be defined in the future. + * + * @values: + * FT_ENCODING_NONE :: + * The encoding value~0 is reserved for all formats except BDF, PCF, + * and Windows FNT; see below for more information. + * + * FT_ENCODING_UNICODE :: + * The Unicode character set. This value covers all versions of the + * Unicode repertoire, including ASCII and Latin-1. Most fonts include + * a Unicode charmap, but not all of them. + * + * For example, if you want to access Unicode value U+1F028 (and the + * font contains it), use value 0x1F028 as the input value for + * @FT_Get_Char_Index. + * + * FT_ENCODING_MS_SYMBOL :: + * Microsoft Symbol encoding, used to encode mathematical symbols and + * wingdings. For more information, see + * 'https://www.microsoft.com/typography/otspec/recom.htm#non-standard-symbol-fonts', + * 'http://www.kostis.net/charsets/symbol.htm', and + * 'http://www.kostis.net/charsets/wingding.htm'. + * + * This encoding uses character codes from the PUA (Private Unicode + * Area) in the range U+F020-U+F0FF. + * + * FT_ENCODING_SJIS :: + * Shift JIS encoding for Japanese. More info at + * 'https://en.wikipedia.org/wiki/Shift_JIS'. See note on multi-byte + * encodings below. + * + * FT_ENCODING_PRC :: + * Corresponds to encoding systems mainly for Simplified Chinese as + * used in People's Republic of China (PRC). The encoding layout is + * based on GB~2312 and its supersets GBK and GB~18030. + * + * FT_ENCODING_BIG5 :: + * Corresponds to an encoding system for Traditional Chinese as used in + * Taiwan and Hong Kong. + * + * FT_ENCODING_WANSUNG :: + * Corresponds to the Korean encoding system known as Extended Wansung + * (MS Windows code page 949). For more information see + * 'https://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WindowsBestFit/bestfit949.txt'. + * + * FT_ENCODING_JOHAB :: + * The Korean standard character set (KS~C 5601-1992), which + * corresponds to MS Windows code page 1361. This character set + * includes all possible Hangul character combinations. + * + * FT_ENCODING_ADOBE_LATIN_1 :: + * Corresponds to a Latin-1 encoding as defined in a Type~1 PostScript + * font. It is limited to 256 character codes. + * + * FT_ENCODING_ADOBE_STANDARD :: + * Adobe Standard encoding, as found in Type~1, CFF, and OpenType/CFF + * fonts. It is limited to 256 character codes. + * + * FT_ENCODING_ADOBE_EXPERT :: + * Adobe Expert encoding, as found in Type~1, CFF, and OpenType/CFF + * fonts. It is limited to 256 character codes. + * + * FT_ENCODING_ADOBE_CUSTOM :: + * Corresponds to a custom encoding, as found in Type~1, CFF, and + * OpenType/CFF fonts. It is limited to 256 character codes. + * + * FT_ENCODING_APPLE_ROMAN :: + * Apple roman encoding. Many TrueType and OpenType fonts contain a + * charmap for this 8-bit encoding, since older versions of Mac OS are + * able to use it. + * + * FT_ENCODING_OLD_LATIN_2 :: + * This value is deprecated and was neither used nor reported by + * FreeType. Don't use or test for it. + * + * FT_ENCODING_MS_SJIS :: + * Same as FT_ENCODING_SJIS. Deprecated. + * + * FT_ENCODING_MS_GB2312 :: + * Same as FT_ENCODING_PRC. Deprecated. + * + * FT_ENCODING_MS_BIG5 :: + * Same as FT_ENCODING_BIG5. Deprecated. + * + * FT_ENCODING_MS_WANSUNG :: + * Same as FT_ENCODING_WANSUNG. Deprecated. + * + * FT_ENCODING_MS_JOHAB :: + * Same as FT_ENCODING_JOHAB. Deprecated. + * + * @note: + * When loading a font, FreeType makes a Unicode charmap active if + * possible (either if the font provides such a charmap, or if FreeType + * can synthesize one from PostScript glyph name dictionaries; in either + * case, the charmap is tagged with `FT_ENCODING_UNICODE`). If such a + * charmap is synthesized, it is placed at the first position of the + * charmap array. + * + * All other encodings are considered legacy and tagged only if + * explicitly defined in the font file. Otherwise, `FT_ENCODING_NONE` is + * used. + * + * `FT_ENCODING_NONE` is set by the BDF and PCF drivers if the charmap is + * neither Unicode nor ISO-8859-1 (otherwise it is set to + * `FT_ENCODING_UNICODE`). Use @FT_Get_BDF_Charset_ID to find out which + * encoding is really present. If, for example, the `cs_registry` field + * is 'KOI8' and the `cs_encoding` field is 'R', the font is encoded in + * KOI8-R. + * + * `FT_ENCODING_NONE` is always set (with a single exception) by the + * winfonts driver. Use @FT_Get_WinFNT_Header and examine the `charset` + * field of the @FT_WinFNT_HeaderRec structure to find out which encoding + * is really present. For example, @FT_WinFNT_ID_CP1251 (204) means + * Windows code page 1251 (for Russian). + * + * `FT_ENCODING_NONE` is set if `platform_id` is @TT_PLATFORM_MACINTOSH + * and `encoding_id` is not `TT_MAC_ID_ROMAN` (otherwise it is set to + * `FT_ENCODING_APPLE_ROMAN`). + * + * If `platform_id` is @TT_PLATFORM_MACINTOSH, use the function + * @FT_Get_CMap_Language_ID to query the Mac language ID that may be + * needed to be able to distinguish Apple encoding variants. See + * + * https://www.unicode.org/Public/MAPPINGS/VENDORS/APPLE/Readme.txt + * + * to get an idea how to do that. Basically, if the language ID is~0, + * don't use it, otherwise subtract 1 from the language ID. Then examine + * `encoding_id`. If, for example, `encoding_id` is `TT_MAC_ID_ROMAN` + * and the language ID (minus~1) is `TT_MAC_LANGID_GREEK`, it is the + * Greek encoding, not Roman. `TT_MAC_ID_ARABIC` with + * `TT_MAC_LANGID_FARSI` means the Farsi variant of the Arabic encoding. + */ + typedef enum FT_Encoding_ + { + FT_ENC_TAG( FT_ENCODING_NONE, 0, 0, 0, 0 ), + + FT_ENC_TAG( FT_ENCODING_MS_SYMBOL, 's', 'y', 'm', 'b' ), + FT_ENC_TAG( FT_ENCODING_UNICODE, 'u', 'n', 'i', 'c' ), + + FT_ENC_TAG( FT_ENCODING_SJIS, 's', 'j', 'i', 's' ), + FT_ENC_TAG( FT_ENCODING_PRC, 'g', 'b', ' ', ' ' ), + FT_ENC_TAG( FT_ENCODING_BIG5, 'b', 'i', 'g', '5' ), + FT_ENC_TAG( FT_ENCODING_WANSUNG, 'w', 'a', 'n', 's' ), + FT_ENC_TAG( FT_ENCODING_JOHAB, 'j', 'o', 'h', 'a' ), + + /* for backward compatibility */ + FT_ENCODING_GB2312 = FT_ENCODING_PRC, + FT_ENCODING_MS_SJIS = FT_ENCODING_SJIS, + FT_ENCODING_MS_GB2312 = FT_ENCODING_PRC, + FT_ENCODING_MS_BIG5 = FT_ENCODING_BIG5, + FT_ENCODING_MS_WANSUNG = FT_ENCODING_WANSUNG, + FT_ENCODING_MS_JOHAB = FT_ENCODING_JOHAB, + + FT_ENC_TAG( FT_ENCODING_ADOBE_STANDARD, 'A', 'D', 'O', 'B' ), + FT_ENC_TAG( FT_ENCODING_ADOBE_EXPERT, 'A', 'D', 'B', 'E' ), + FT_ENC_TAG( FT_ENCODING_ADOBE_CUSTOM, 'A', 'D', 'B', 'C' ), + FT_ENC_TAG( FT_ENCODING_ADOBE_LATIN_1, 'l', 'a', 't', '1' ), + + FT_ENC_TAG( FT_ENCODING_OLD_LATIN_2, 'l', 'a', 't', '2' ), + + FT_ENC_TAG( FT_ENCODING_APPLE_ROMAN, 'a', 'r', 'm', 'n' ) + + } FT_Encoding; + + + /* these constants are deprecated; use the corresponding `FT_Encoding` */ + /* values instead */ +#define ft_encoding_none FT_ENCODING_NONE +#define ft_encoding_unicode FT_ENCODING_UNICODE +#define ft_encoding_symbol FT_ENCODING_MS_SYMBOL +#define ft_encoding_latin_1 FT_ENCODING_ADOBE_LATIN_1 +#define ft_encoding_latin_2 FT_ENCODING_OLD_LATIN_2 +#define ft_encoding_sjis FT_ENCODING_SJIS +#define ft_encoding_gb2312 FT_ENCODING_PRC +#define ft_encoding_big5 FT_ENCODING_BIG5 +#define ft_encoding_wansung FT_ENCODING_WANSUNG +#define ft_encoding_johab FT_ENCODING_JOHAB + +#define ft_encoding_adobe_standard FT_ENCODING_ADOBE_STANDARD +#define ft_encoding_adobe_expert FT_ENCODING_ADOBE_EXPERT +#define ft_encoding_adobe_custom FT_ENCODING_ADOBE_CUSTOM +#define ft_encoding_apple_roman FT_ENCODING_APPLE_ROMAN + + + /************************************************************************** + * + * @struct: + * FT_CharMapRec + * + * @description: + * The base charmap structure. + * + * @fields: + * face :: + * A handle to the parent face object. + * + * encoding :: + * An @FT_Encoding tag identifying the charmap. Use this with + * @FT_Select_Charmap. + * + * platform_id :: + * An ID number describing the platform for the following encoding ID. + * This comes directly from the TrueType specification and gets + * emulated for other formats. + * + * encoding_id :: + * A platform-specific encoding number. This also comes from the + * TrueType specification and gets emulated similarly. + */ + typedef struct FT_CharMapRec_ + { + FT_Face face; + FT_Encoding encoding; + FT_UShort platform_id; + FT_UShort encoding_id; + + } FT_CharMapRec; + + + /*************************************************************************/ + /*************************************************************************/ + /* */ + /* B A S E O B J E C T C L A S S E S */ + /* */ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @section: + * other_api_data + * + */ + + /************************************************************************** + * + * @type: + * FT_Face_Internal + * + * @description: + * An opaque handle to an `FT_Face_InternalRec` structure that models the + * private data of a given @FT_Face object. + * + * This structure might change between releases of FreeType~2 and is not + * generally available to client applications. + */ + typedef struct FT_Face_InternalRec_* FT_Face_Internal; + + + /************************************************************************** + * + * @section: + * face_creation + * + */ + + /************************************************************************** + * + * @struct: + * FT_FaceRec + * + * @description: + * FreeType root face class structure. A face object models a typeface + * in a font file. + * + * @fields: + * num_faces :: + * The number of faces in the font file. Some font formats can have + * multiple faces in a single font file. + * + * face_index :: + * This field holds two different values. Bits 0-15 are the index of + * the face in the font file (starting with value~0). They are set + * to~0 if there is only one face in the font file. + * + * [Since 2.6.1] Bits 16-30 are relevant to GX and OpenType variation + * fonts only, holding the named instance index for the current face + * index (starting with value~1; value~0 indicates font access without + * a named instance). For non-variation fonts, bits 16-30 are ignored. + * If we have the third named instance of face~4, say, `face_index` is + * set to 0x00030004. + * + * Bit 31 is always zero (that is, `face_index` is always a positive + * value). + * + * [Since 2.9] Changing the design coordinates with + * @FT_Set_Var_Design_Coordinates or @FT_Set_Var_Blend_Coordinates does + * not influence the named instance index value (only + * @FT_Set_Named_Instance does that). + * + * face_flags :: + * A set of bit flags that give important information about the face; + * see @FT_FACE_FLAG_XXX for the details. + * + * style_flags :: + * The lower 16~bits contain a set of bit flags indicating the style of + * the face; see @FT_STYLE_FLAG_XXX for the details. + * + * [Since 2.6.1] Bits 16-30 hold the number of named instances + * available for the current face if we have a GX or OpenType variation + * (sub)font. Bit 31 is always zero (that is, `style_flags` is always + * a positive value). Note that a variation font has always at least + * one named instance, namely the default instance. + * + * num_glyphs :: + * The number of glyphs in the face. If the face is scalable and has + * sbits (see `num_fixed_sizes`), it is set to the number of outline + * glyphs. + * + * For CID-keyed fonts (not in an SFNT wrapper) this value gives the + * highest CID used in the font. + * + * family_name :: + * The face's family name. This is an ASCII string, usually in + * English, that describes the typeface's family (like 'Times New + * Roman', 'Bodoni', 'Garamond', etc). This is a least common + * denominator used to list fonts. Some formats (TrueType & OpenType) + * provide localized and Unicode versions of this string. Applications + * should use the format-specific interface to access them. Can be + * `NULL` (e.g., in fonts embedded in a PDF file). + * + * In case the font doesn't provide a specific family name entry, + * FreeType tries to synthesize one, deriving it from other name + * entries. + * + * style_name :: + * The face's style name. This is an ASCII string, usually in English, + * that describes the typeface's style (like 'Italic', 'Bold', + * 'Condensed', etc). Not all font formats provide a style name, so + * this field is optional, and can be set to `NULL`. As for + * `family_name`, some formats provide localized and Unicode versions + * of this string. Applications should use the format-specific + * interface to access them. + * + * num_fixed_sizes :: + * The number of bitmap strikes in the face. Even if the face is + * scalable, there might still be bitmap strikes, which are called + * 'sbits' in that case. + * + * available_sizes :: + * An array of @FT_Bitmap_Size for all bitmap strikes in the face. It + * is set to `NULL` if there is no bitmap strike. + * + * Note that FreeType tries to sanitize the strike data since they are + * sometimes sloppy or incorrect, but this can easily fail. + * + * num_charmaps :: + * The number of charmaps in the face. + * + * charmaps :: + * An array of the charmaps of the face. + * + * generic :: + * A field reserved for client uses. See the @FT_Generic type + * description. + * + * bbox :: + * The font bounding box. Coordinates are expressed in font units (see + * `units_per_EM`). The box is large enough to contain any glyph from + * the font. Thus, `bbox.yMax` can be seen as the 'maximum ascender', + * and `bbox.yMin` as the 'minimum descender'. Only relevant for + * scalable formats. + * + * Note that the bounding box might be off by (at least) one pixel for + * hinted fonts. See @FT_Size_Metrics for further discussion. + * + * Note that the bounding box does not vary in OpenType variation fonts + * and should only be used in relation to the default instance. + * + * units_per_EM :: + * The number of font units per EM square for this face. This is + * typically 2048 for TrueType fonts, and 1000 for Type~1 fonts. Only + * relevant for scalable formats. + * + * ascender :: + * The typographic ascender of the face, expressed in font units. For + * font formats not having this information, it is set to `bbox.yMax`. + * Only relevant for scalable formats. + * + * descender :: + * The typographic descender of the face, expressed in font units. For + * font formats not having this information, it is set to `bbox.yMin`. + * Note that this field is negative for values below the baseline. + * Only relevant for scalable formats. + * + * height :: + * This value is the vertical distance between two consecutive + * baselines, expressed in font units. It is always positive. Only + * relevant for scalable formats. + * + * If you want the global glyph height, use `ascender - descender`. + * + * max_advance_width :: + * The maximum advance width, in font units, for all glyphs in this + * face. This can be used to make word wrapping computations faster. + * Only relevant for scalable formats. + * + * max_advance_height :: + * The maximum advance height, in font units, for all glyphs in this + * face. This is only relevant for vertical layouts, and is set to + * `height` for fonts that do not provide vertical metrics. Only + * relevant for scalable formats. + * + * underline_position :: + * The position, in font units, of the underline line for this face. + * It is the center of the underlining stem. Only relevant for + * scalable formats. + * + * underline_thickness :: + * The thickness, in font units, of the underline for this face. Only + * relevant for scalable formats. + * + * glyph :: + * The face's associated glyph slot(s). + * + * size :: + * The current active size for this face. + * + * charmap :: + * The current active charmap for this face. + * + * @note: + * Fields may be changed after a call to @FT_Attach_File or + * @FT_Attach_Stream. + * + * For an OpenType variation font, the values of the following fields can + * change after a call to @FT_Set_Var_Design_Coordinates (and friends) if + * the font contains an 'MVAR' table: `ascender`, `descender`, `height`, + * `underline_position`, and `underline_thickness`. + * + * Especially for TrueType fonts see also the documentation for + * @FT_Size_Metrics. + */ + typedef struct FT_FaceRec_ + { + FT_Long num_faces; + FT_Long face_index; + + FT_Long face_flags; + FT_Long style_flags; + + FT_Long num_glyphs; + + FT_String* family_name; + FT_String* style_name; + + FT_Int num_fixed_sizes; + FT_Bitmap_Size* available_sizes; + + FT_Int num_charmaps; + FT_CharMap* charmaps; + + FT_Generic generic; + + /* The following member variables (down to `underline_thickness`) */ + /* are only relevant to scalable outlines; cf. @FT_Bitmap_Size */ + /* for bitmap fonts. */ + FT_BBox bbox; + + FT_UShort units_per_EM; + FT_Short ascender; + FT_Short descender; + FT_Short height; + + FT_Short max_advance_width; + FT_Short max_advance_height; + + FT_Short underline_position; + FT_Short underline_thickness; + + FT_GlyphSlot glyph; + FT_Size size; + FT_CharMap charmap; + + /* private fields, internal to FreeType */ + + FT_Driver driver; + FT_Memory memory; + FT_Stream stream; + + FT_ListRec sizes_list; + + FT_Generic autohint; /* face-specific auto-hinter data */ + void* extensions; /* unused */ + + FT_Face_Internal internal; + + } FT_FaceRec; + + + /************************************************************************** + * + * @enum: + * FT_FACE_FLAG_XXX + * + * @description: + * A list of bit flags used in the `face_flags` field of the @FT_FaceRec + * structure. They inform client applications of properties of the + * corresponding face. + * + * @values: + * FT_FACE_FLAG_SCALABLE :: + * The face contains outline glyphs. Note that a face can contain + * bitmap strikes also, i.e., a face can have both this flag and + * @FT_FACE_FLAG_FIXED_SIZES set. + * + * FT_FACE_FLAG_FIXED_SIZES :: + * The face contains bitmap strikes. See also the `num_fixed_sizes` + * and `available_sizes` fields of @FT_FaceRec. + * + * FT_FACE_FLAG_FIXED_WIDTH :: + * The face contains fixed-width characters (like Courier, Lucida, + * MonoType, etc.). + * + * FT_FACE_FLAG_SFNT :: + * The face uses the SFNT storage scheme. For now, this means TrueType + * and OpenType. + * + * FT_FACE_FLAG_HORIZONTAL :: + * The face contains horizontal glyph metrics. This should be set for + * all common formats. + * + * FT_FACE_FLAG_VERTICAL :: + * The face contains vertical glyph metrics. This is only available in + * some formats, not all of them. + * + * FT_FACE_FLAG_KERNING :: + * The face contains kerning information. If set, the kerning distance + * can be retrieved using the function @FT_Get_Kerning. Otherwise the + * function always returns the vector (0,0). Note that FreeType + * doesn't handle kerning data from the SFNT 'GPOS' table (as present + * in many OpenType fonts). + * + * FT_FACE_FLAG_FAST_GLYPHS :: + * THIS FLAG IS DEPRECATED. DO NOT USE OR TEST IT. + * + * FT_FACE_FLAG_MULTIPLE_MASTERS :: + * The face contains multiple masters and is capable of interpolating + * between them. Supported formats are Adobe MM, TrueType GX, and + * OpenType variation fonts. + * + * See section @multiple_masters for API details. + * + * FT_FACE_FLAG_GLYPH_NAMES :: + * The face contains glyph names, which can be retrieved using + * @FT_Get_Glyph_Name. Note that some TrueType fonts contain broken + * glyph name tables. Use the function @FT_Has_PS_Glyph_Names when + * needed. + * + * FT_FACE_FLAG_EXTERNAL_STREAM :: + * Used internally by FreeType to indicate that a face's stream was + * provided by the client application and should not be destroyed when + * @FT_Done_Face is called. Don't read or test this flag. + * + * FT_FACE_FLAG_HINTER :: + * The font driver has a hinting machine of its own. For example, with + * TrueType fonts, it makes sense to use data from the SFNT 'gasp' + * table only if the native TrueType hinting engine (with the bytecode + * interpreter) is available and active. + * + * FT_FACE_FLAG_CID_KEYED :: + * The face is CID-keyed. In that case, the face is not accessed by + * glyph indices but by CID values. For subsetted CID-keyed fonts this + * has the consequence that not all index values are a valid argument + * to @FT_Load_Glyph. Only the CID values for which corresponding + * glyphs in the subsetted font exist make `FT_Load_Glyph` return + * successfully; in all other cases you get an + * `FT_Err_Invalid_Argument` error. + * + * Note that CID-keyed fonts that are in an SFNT wrapper (that is, all + * OpenType/CFF fonts) don't have this flag set since the glyphs are + * accessed in the normal way (using contiguous indices); the + * 'CID-ness' isn't visible to the application. + * + * FT_FACE_FLAG_TRICKY :: + * The face is 'tricky', that is, it always needs the font format's + * native hinting engine to get a reasonable result. A typical example + * is the old Chinese font `mingli.ttf` (but not `mingliu.ttc`) that + * uses TrueType bytecode instructions to move and scale all of its + * subglyphs. + * + * It is not possible to auto-hint such fonts using + * @FT_LOAD_FORCE_AUTOHINT; it will also ignore @FT_LOAD_NO_HINTING. + * You have to set both @FT_LOAD_NO_HINTING and @FT_LOAD_NO_AUTOHINT to + * really disable hinting; however, you probably never want this except + * for demonstration purposes. + * + * Currently, there are about a dozen TrueType fonts in the list of + * tricky fonts; they are hard-coded in file `ttobjs.c`. + * + * FT_FACE_FLAG_COLOR :: + * [Since 2.5.1] The face has color glyph tables. See @FT_LOAD_COLOR + * for more information. + * + * FT_FACE_FLAG_VARIATION :: + * [Since 2.9] Set if the current face (or named instance) has been + * altered with @FT_Set_MM_Design_Coordinates, + * @FT_Set_Var_Design_Coordinates, @FT_Set_Var_Blend_Coordinates, or + * @FT_Set_MM_WeightVector to select a non-default instance. + * + * FT_FACE_FLAG_SVG :: + * [Since 2.12] The face has an 'SVG~' OpenType table. + * + * FT_FACE_FLAG_SBIX :: + * [Since 2.12] The face has an 'sbix' OpenType table *and* outlines. + * For such fonts, @FT_FACE_FLAG_SCALABLE is not set by default to + * retain backward compatibility. + * + * FT_FACE_FLAG_SBIX_OVERLAY :: + * [Since 2.12] The face has an 'sbix' OpenType table where outlines + * should be drawn on top of bitmap strikes. + * + */ +#define FT_FACE_FLAG_SCALABLE ( 1L << 0 ) +#define FT_FACE_FLAG_FIXED_SIZES ( 1L << 1 ) +#define FT_FACE_FLAG_FIXED_WIDTH ( 1L << 2 ) +#define FT_FACE_FLAG_SFNT ( 1L << 3 ) +#define FT_FACE_FLAG_HORIZONTAL ( 1L << 4 ) +#define FT_FACE_FLAG_VERTICAL ( 1L << 5 ) +#define FT_FACE_FLAG_KERNING ( 1L << 6 ) +#define FT_FACE_FLAG_FAST_GLYPHS ( 1L << 7 ) +#define FT_FACE_FLAG_MULTIPLE_MASTERS ( 1L << 8 ) +#define FT_FACE_FLAG_GLYPH_NAMES ( 1L << 9 ) +#define FT_FACE_FLAG_EXTERNAL_STREAM ( 1L << 10 ) +#define FT_FACE_FLAG_HINTER ( 1L << 11 ) +#define FT_FACE_FLAG_CID_KEYED ( 1L << 12 ) +#define FT_FACE_FLAG_TRICKY ( 1L << 13 ) +#define FT_FACE_FLAG_COLOR ( 1L << 14 ) +#define FT_FACE_FLAG_VARIATION ( 1L << 15 ) +#define FT_FACE_FLAG_SVG ( 1L << 16 ) +#define FT_FACE_FLAG_SBIX ( 1L << 17 ) +#define FT_FACE_FLAG_SBIX_OVERLAY ( 1L << 18 ) + + + /************************************************************************** + * + * @section: + * font_testing_macros + * + */ + + /************************************************************************** + * + * @macro: + * FT_HAS_HORIZONTAL + * + * @description: + * A macro that returns true whenever a face object contains horizontal + * metrics (this is true for all font formats though). + * + * @also: + * @FT_HAS_VERTICAL can be used to check for vertical metrics. + * + */ +#define FT_HAS_HORIZONTAL( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_HORIZONTAL ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_VERTICAL + * + * @description: + * A macro that returns true whenever a face object contains real + * vertical metrics (and not only synthesized ones). + * + */ +#define FT_HAS_VERTICAL( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_VERTICAL ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_KERNING + * + * @description: + * A macro that returns true whenever a face object contains kerning data + * that can be accessed with @FT_Get_Kerning. + * + */ +#define FT_HAS_KERNING( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_KERNING ) ) + + + /************************************************************************** + * + * @macro: + * FT_IS_SCALABLE + * + * @description: + * A macro that returns true whenever a face object contains a scalable + * font face (true for TrueType, Type~1, Type~42, CID, OpenType/CFF, and + * PFR font formats). + * + */ +#define FT_IS_SCALABLE( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_SCALABLE ) ) + + + /************************************************************************** + * + * @macro: + * FT_IS_SFNT + * + * @description: + * A macro that returns true whenever a face object contains a font whose + * format is based on the SFNT storage scheme. This usually means: + * TrueType fonts, OpenType fonts, as well as SFNT-based embedded bitmap + * fonts. + * + * If this macro is true, all functions defined in @FT_SFNT_NAMES_H and + * @FT_TRUETYPE_TABLES_H are available. + * + */ +#define FT_IS_SFNT( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_SFNT ) ) + + + /************************************************************************** + * + * @macro: + * FT_IS_FIXED_WIDTH + * + * @description: + * A macro that returns true whenever a face object contains a font face + * that contains fixed-width (or 'monospace', 'fixed-pitch', etc.) + * glyphs. + * + */ +#define FT_IS_FIXED_WIDTH( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_FIXED_WIDTH ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_FIXED_SIZES + * + * @description: + * A macro that returns true whenever a face object contains some + * embedded bitmaps. See the `available_sizes` field of the @FT_FaceRec + * structure. + * + */ +#define FT_HAS_FIXED_SIZES( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_FIXED_SIZES ) ) + + + /************************************************************************** + * + * @section: + * other_api_data + * + */ + + /************************************************************************** + * + * @macro: + * FT_HAS_FAST_GLYPHS + * + * @description: + * Deprecated. + * + */ +#define FT_HAS_FAST_GLYPHS( face ) 0 + + + /************************************************************************** + * + * @section: + * font_testing_macros + * + */ + + /************************************************************************** + * + * @macro: + * FT_HAS_GLYPH_NAMES + * + * @description: + * A macro that returns true whenever a face object contains some glyph + * names that can be accessed through @FT_Get_Glyph_Name. + * + */ +#define FT_HAS_GLYPH_NAMES( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_GLYPH_NAMES ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_MULTIPLE_MASTERS + * + * @description: + * A macro that returns true whenever a face object contains some + * multiple masters. The functions provided by @FT_MULTIPLE_MASTERS_H + * are then available to choose the exact design you want. + * + */ +#define FT_HAS_MULTIPLE_MASTERS( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS ) ) + + + /************************************************************************** + * + * @macro: + * FT_IS_NAMED_INSTANCE + * + * @description: + * A macro that returns true whenever a face object is a named instance + * of a GX or OpenType variation font. + * + * [Since 2.9] Changing the design coordinates with + * @FT_Set_Var_Design_Coordinates or @FT_Set_Var_Blend_Coordinates does + * not influence the return value of this macro (only + * @FT_Set_Named_Instance does that). + * + * @since: + * 2.7 + * + */ +#define FT_IS_NAMED_INSTANCE( face ) \ + ( !!( (face)->face_index & 0x7FFF0000L ) ) + + + /************************************************************************** + * + * @macro: + * FT_IS_VARIATION + * + * @description: + * A macro that returns true whenever a face object has been altered by + * @FT_Set_MM_Design_Coordinates, @FT_Set_Var_Design_Coordinates, + * @FT_Set_Var_Blend_Coordinates, or @FT_Set_MM_WeightVector. + * + * @since: + * 2.9 + * + */ +#define FT_IS_VARIATION( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_VARIATION ) ) + + + /************************************************************************** + * + * @macro: + * FT_IS_CID_KEYED + * + * @description: + * A macro that returns true whenever a face object contains a CID-keyed + * font. See the discussion of @FT_FACE_FLAG_CID_KEYED for more details. + * + * If this macro is true, all functions defined in @FT_CID_H are + * available. + * + */ +#define FT_IS_CID_KEYED( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_CID_KEYED ) ) + + + /************************************************************************** + * + * @macro: + * FT_IS_TRICKY + * + * @description: + * A macro that returns true whenever a face represents a 'tricky' font. + * See the discussion of @FT_FACE_FLAG_TRICKY for more details. + * + */ +#define FT_IS_TRICKY( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_TRICKY ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_COLOR + * + * @description: + * A macro that returns true whenever a face object contains tables for + * color glyphs. + * + * @since: + * 2.5.1 + * + */ +#define FT_HAS_COLOR( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_COLOR ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_SVG + * + * @description: + * A macro that returns true whenever a face object contains an 'SVG~' + * OpenType table. + * + * @since: + * 2.12 + */ +#define FT_HAS_SVG( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_SVG ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_SBIX + * + * @description: + * A macro that returns true whenever a face object contains an 'sbix' + * OpenType table *and* outline glyphs. + * + * Currently, FreeType only supports bitmap glyphs in PNG format for this + * table (i.e., JPEG and TIFF formats are unsupported, as are + * Apple-specific formats not part of the OpenType specification). + * + * @note: + * For backward compatibility, a font with an 'sbix' table is treated as + * a bitmap-only face. Using @FT_Open_Face with + * @FT_PARAM_TAG_IGNORE_SBIX, an application can switch off 'sbix' + * handling so that the face is treated as an ordinary outline font with + * scalable outlines. + * + * Here is some pseudo code that roughly illustrates how to implement + * 'sbix' handling according to the OpenType specification. + * + * ``` + * if ( FT_HAS_SBIX( face ) ) + * { + * // open font as a scalable one without sbix handling + * FT_Face face2; + * FT_Parameter param = { FT_PARAM_TAG_IGNORE_SBIX, NULL }; + * FT_Open_Args args = { FT_OPEN_PARAMS | ..., + * ..., + * 1, ¶m }; + * + * + * FT_Open_Face( library, &args, 0, &face2 ); + * + * available_size` as necessary into + * `preferred_sizes`[*]> + * + * for ( i = 0; i < face->num_fixed_sizes; i++ ) + * { + * size = preferred_sizes[i].size; + * + * error = FT_Set_Pixel_Sizes( face, size, size ); + * + * + * // check whether we have a glyph in a bitmap strike + * error = FT_Load_Glyph( face, + * glyph_index, + * FT_LOAD_SBITS_ONLY | + * FT_LOAD_BITMAP_METRICS_ONLY ); + * if ( error == FT_Err_Invalid_Argument ) + * continue; + * else if ( error ) + * + * else + * break; + * } + * + * if ( i != face->num_fixed_sizes ) + * + * + * if ( i == face->num_fixed_sizes || + * FT_HAS_SBIX_OVERLAY( face ) ) + * + * } + * ``` + * + * [*] Assuming a target value of 400dpi and available strike sizes 100, + * 200, 300, and 400dpi, a possible order might be [400, 200, 300, 100]: + * scaling 200dpi to 400dpi usually gives better results than scaling + * 300dpi to 400dpi; it is also much faster. However, scaling 100dpi to + * 400dpi can yield a too pixelated result, thus the preference might be + * 300dpi over 100dpi. + * + * @since: + * 2.12 + */ +#define FT_HAS_SBIX( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_SBIX ) ) + + + /************************************************************************** + * + * @macro: + * FT_HAS_SBIX_OVERLAY + * + * @description: + * A macro that returns true whenever a face object contains an 'sbix' + * OpenType table with bit~1 in its `flags` field set, instructing the + * application to overlay the bitmap strike with the corresponding + * outline glyph. See @FT_HAS_SBIX for pseudo code how to use it. + * + * @since: + * 2.12 + */ +#define FT_HAS_SBIX_OVERLAY( face ) \ + ( !!( (face)->face_flags & FT_FACE_FLAG_SBIX_OVERLAY ) ) + + + /************************************************************************** + * + * @section: + * face_creation + * + */ + + /************************************************************************** + * + * @enum: + * FT_STYLE_FLAG_XXX + * + * @description: + * A list of bit flags to indicate the style of a given face. These are + * used in the `style_flags` field of @FT_FaceRec. + * + * @values: + * FT_STYLE_FLAG_ITALIC :: + * The face style is italic or oblique. + * + * FT_STYLE_FLAG_BOLD :: + * The face is bold. + * + * @note: + * The style information as provided by FreeType is very basic. More + * details are beyond the scope and should be done on a higher level (for + * example, by analyzing various fields of the 'OS/2' table in SFNT based + * fonts). + */ +#define FT_STYLE_FLAG_ITALIC ( 1 << 0 ) +#define FT_STYLE_FLAG_BOLD ( 1 << 1 ) + + + /************************************************************************** + * + * @section: + * other_api_data + * + */ + + /************************************************************************** + * + * @type: + * FT_Size_Internal + * + * @description: + * An opaque handle to an `FT_Size_InternalRec` structure, used to model + * private data of a given @FT_Size object. + */ + typedef struct FT_Size_InternalRec_* FT_Size_Internal; + + + /************************************************************************** + * + * @section: + * sizing_and_scaling + * + */ + + /************************************************************************** + * + * @struct: + * FT_Size_Metrics + * + * @description: + * The size metrics structure gives the metrics of a size object. + * + * @fields: + * x_ppem :: + * The width of the scaled EM square in pixels, hence the term 'ppem' + * (pixels per EM). It is also referred to as 'nominal width'. + * + * y_ppem :: + * The height of the scaled EM square in pixels, hence the term 'ppem' + * (pixels per EM). It is also referred to as 'nominal height'. + * + * x_scale :: + * A 16.16 fractional scaling value to convert horizontal metrics from + * font units to 26.6 fractional pixels. Only relevant for scalable + * font formats. + * + * y_scale :: + * A 16.16 fractional scaling value to convert vertical metrics from + * font units to 26.6 fractional pixels. Only relevant for scalable + * font formats. + * + * ascender :: + * The ascender in 26.6 fractional pixels, rounded up to an integer + * value. See @FT_FaceRec for the details. + * + * descender :: + * The descender in 26.6 fractional pixels, rounded down to an integer + * value. See @FT_FaceRec for the details. + * + * height :: + * The height in 26.6 fractional pixels, rounded to an integer value. + * See @FT_FaceRec for the details. + * + * max_advance :: + * The maximum advance width in 26.6 fractional pixels, rounded to an + * integer value. See @FT_FaceRec for the details. + * + * @note: + * The scaling values, if relevant, are determined first during a size + * changing operation. The remaining fields are then set by the driver. + * For scalable formats, they are usually set to scaled values of the + * corresponding fields in @FT_FaceRec. Some values like ascender or + * descender are rounded for historical reasons; more precise values (for + * outline fonts) can be derived by scaling the corresponding @FT_FaceRec + * values manually, with code similar to the following. + * + * ``` + * scaled_ascender = FT_MulFix( face->ascender, + * size_metrics->y_scale ); + * ``` + * + * Note that due to glyph hinting and the selected rendering mode these + * values are usually not exact; consequently, they must be treated as + * unreliable with an error margin of at least one pixel! + * + * Indeed, the only way to get the exact metrics is to render _all_ + * glyphs. As this would be a definite performance hit, it is up to + * client applications to perform such computations. + * + * The `FT_Size_Metrics` structure is valid for bitmap fonts also. + * + * + * **TrueType fonts with native bytecode hinting** + * + * All applications that handle TrueType fonts with native hinting must + * be aware that TTFs expect different rounding of vertical font + * dimensions. The application has to cater for this, especially if it + * wants to rely on a TTF's vertical data (for example, to properly align + * box characters vertically). + * + * Only the application knows _in advance_ that it is going to use native + * hinting for TTFs! FreeType, on the other hand, selects the hinting + * mode not at the time of creating an @FT_Size object but much later, + * namely while calling @FT_Load_Glyph. + * + * Here is some pseudo code that illustrates a possible solution. + * + * ``` + * font_format = FT_Get_Font_Format( face ); + * + * if ( !strcmp( font_format, "TrueType" ) && + * do_native_bytecode_hinting ) + * { + * ascender = ROUND( FT_MulFix( face->ascender, + * size_metrics->y_scale ) ); + * descender = ROUND( FT_MulFix( face->descender, + * size_metrics->y_scale ) ); + * } + * else + * { + * ascender = size_metrics->ascender; + * descender = size_metrics->descender; + * } + * + * height = size_metrics->height; + * max_advance = size_metrics->max_advance; + * ``` + */ + typedef struct FT_Size_Metrics_ + { + FT_UShort x_ppem; /* horizontal pixels per EM */ + FT_UShort y_ppem; /* vertical pixels per EM */ + + FT_Fixed x_scale; /* scaling values used to convert font */ + FT_Fixed y_scale; /* units to 26.6 fractional pixels */ + + FT_Pos ascender; /* ascender in 26.6 frac. pixels */ + FT_Pos descender; /* descender in 26.6 frac. pixels */ + FT_Pos height; /* text height in 26.6 frac. pixels */ + FT_Pos max_advance; /* max horizontal advance, in 26.6 pixels */ + + } FT_Size_Metrics; + + + /************************************************************************** + * + * @struct: + * FT_SizeRec + * + * @description: + * FreeType root size class structure. A size object models a face + * object at a given size. + * + * @fields: + * face :: + * Handle to the parent face object. + * + * generic :: + * A typeless pointer, unused by the FreeType library or any of its + * drivers. It can be used by client applications to link their own + * data to each size object. + * + * metrics :: + * Metrics for this size object. This field is read-only. + */ + typedef struct FT_SizeRec_ + { + FT_Face face; /* parent face object */ + FT_Generic generic; /* generic pointer for client uses */ + FT_Size_Metrics metrics; /* size metrics */ + FT_Size_Internal internal; + + } FT_SizeRec; + + + /************************************************************************** + * + * @section: + * other_api_data + * + */ + + /************************************************************************** + * + * @struct: + * FT_SubGlyph + * + * @description: + * The subglyph structure is an internal object used to describe + * subglyphs (for example, in the case of composites). + * + * @note: + * The subglyph implementation is not part of the high-level API, hence + * the forward structure declaration. + * + * You can however retrieve subglyph information with + * @FT_Get_SubGlyph_Info. + */ + typedef struct FT_SubGlyphRec_* FT_SubGlyph; + + + /************************************************************************** + * + * @type: + * FT_Slot_Internal + * + * @description: + * An opaque handle to an `FT_Slot_InternalRec` structure, used to model + * private data of a given @FT_GlyphSlot object. + */ + typedef struct FT_Slot_InternalRec_* FT_Slot_Internal; + + + /************************************************************************** + * + * @section: + * glyph_retrieval + * + */ + + /************************************************************************** + * + * @struct: + * FT_GlyphSlotRec + * + * @description: + * FreeType root glyph slot class structure. A glyph slot is a container + * where individual glyphs can be loaded, be they in outline or bitmap + * format. + * + * @fields: + * library :: + * A handle to the FreeType library instance this slot belongs to. + * + * face :: + * A handle to the parent face object. + * + * next :: + * In some cases (like some font tools), several glyph slots per face + * object can be a good thing. As this is rare, the glyph slots are + * listed through a direct, single-linked list using its `next` field. + * + * glyph_index :: + * [Since 2.10] The glyph index passed as an argument to @FT_Load_Glyph + * while initializing the glyph slot. + * + * generic :: + * A typeless pointer unused by the FreeType library or any of its + * drivers. It can be used by client applications to link their own + * data to each glyph slot object. + * + * metrics :: + * The metrics of the last loaded glyph in the slot. The returned + * values depend on the last load flags (see the @FT_Load_Glyph API + * function) and can be expressed either in 26.6 fractional pixels or + * font units. + * + * Note that even when the glyph image is transformed, the metrics are + * not. + * + * linearHoriAdvance :: + * The advance width of the unhinted glyph. Its value is expressed in + * 16.16 fractional pixels, unless @FT_LOAD_LINEAR_DESIGN is set when + * loading the glyph. This field can be important to perform correct + * WYSIWYG layout. Only relevant for scalable glyphs. + * + * linearVertAdvance :: + * The advance height of the unhinted glyph. Its value is expressed in + * 16.16 fractional pixels, unless @FT_LOAD_LINEAR_DESIGN is set when + * loading the glyph. This field can be important to perform correct + * WYSIWYG layout. Only relevant for scalable glyphs. + * + * advance :: + * This shorthand is, depending on @FT_LOAD_IGNORE_TRANSFORM, the + * transformed (hinted) advance width for the glyph, in 26.6 fractional + * pixel format. As specified with @FT_LOAD_VERTICAL_LAYOUT, it uses + * either the `horiAdvance` or the `vertAdvance` value of `metrics` + * field. + * + * format :: + * This field indicates the format of the image contained in the glyph + * slot. Typically @FT_GLYPH_FORMAT_BITMAP, @FT_GLYPH_FORMAT_OUTLINE, + * or @FT_GLYPH_FORMAT_COMPOSITE, but other values are possible. + * + * bitmap :: + * This field is used as a bitmap descriptor. Note that the address + * and content of the bitmap buffer can change between calls of + * @FT_Load_Glyph and a few other functions. + * + * bitmap_left :: + * The bitmap's left bearing expressed in integer pixels. + * + * bitmap_top :: + * The bitmap's top bearing expressed in integer pixels. This is the + * distance from the baseline to the top-most glyph scanline, upwards + * y~coordinates being **positive**. + * + * outline :: + * The outline descriptor for the current glyph image if its format is + * @FT_GLYPH_FORMAT_OUTLINE. Once a glyph is loaded, `outline` can be + * transformed, distorted, emboldened, etc. However, it must not be + * freed. + * + * [Since 2.10.1] If @FT_LOAD_NO_SCALE is set, outline coordinates of + * OpenType variation fonts for a selected instance are internally + * handled as 26.6 fractional font units but returned as (rounded) + * integers, as expected. To get unrounded font units, don't use + * @FT_LOAD_NO_SCALE but load the glyph with @FT_LOAD_NO_HINTING and + * scale it, using the font's `units_per_EM` value as the ppem. + * + * num_subglyphs :: + * The number of subglyphs in a composite glyph. This field is only + * valid for the composite glyph format that should normally only be + * loaded with the @FT_LOAD_NO_RECURSE flag. + * + * subglyphs :: + * An array of subglyph descriptors for composite glyphs. There are + * `num_subglyphs` elements in there. Currently internal to FreeType. + * + * control_data :: + * Certain font drivers can also return the control data for a given + * glyph image (e.g. TrueType bytecode, Type~1 charstrings, etc.). + * This field is a pointer to such data; it is currently internal to + * FreeType. + * + * control_len :: + * This is the length in bytes of the control data. Currently internal + * to FreeType. + * + * other :: + * Reserved. + * + * lsb_delta :: + * The difference between hinted and unhinted left side bearing while + * auto-hinting is active. Zero otherwise. + * + * rsb_delta :: + * The difference between hinted and unhinted right side bearing while + * auto-hinting is active. Zero otherwise. + * + * @note: + * If @FT_Load_Glyph is called with default flags (see @FT_LOAD_DEFAULT) + * the glyph image is loaded in the glyph slot in its native format + * (e.g., an outline glyph for TrueType and Type~1 formats). [Since 2.9] + * The prospective bitmap metrics are calculated according to + * @FT_LOAD_TARGET_XXX and other flags even for the outline glyph, even + * if @FT_LOAD_RENDER is not set. + * + * This image can later be converted into a bitmap by calling + * @FT_Render_Glyph. This function searches the current renderer for the + * native image's format, then invokes it. + * + * The renderer is in charge of transforming the native image through the + * slot's face transformation fields, then converting it into a bitmap + * that is returned in `slot->bitmap`. + * + * Note that `slot->bitmap_left` and `slot->bitmap_top` are also used to + * specify the position of the bitmap relative to the current pen + * position (e.g., coordinates (0,0) on the baseline). Of course, + * `slot->format` is also changed to @FT_GLYPH_FORMAT_BITMAP. + * + * Here is a small pseudo code fragment that shows how to use `lsb_delta` + * and `rsb_delta` to do fractional positioning of glyphs: + * + * ``` + * FT_GlyphSlot slot = face->glyph; + * FT_Pos origin_x = 0; + * + * + * for all glyphs do + * + * + * FT_Outline_Translate( slot->outline, origin_x & 63, 0 ); + * + * + * + * + * + * origin_x += slot->advance.x; + * origin_x += slot->lsb_delta - slot->rsb_delta; + * endfor + * ``` + * + * Here is another small pseudo code fragment that shows how to use + * `lsb_delta` and `rsb_delta` to improve integer positioning of glyphs: + * + * ``` + * FT_GlyphSlot slot = face->glyph; + * FT_Pos origin_x = 0; + * FT_Pos prev_rsb_delta = 0; + * + * + * for all glyphs do + * + * + * + * + * if ( prev_rsb_delta - slot->lsb_delta > 32 ) + * origin_x -= 64; + * else if ( prev_rsb_delta - slot->lsb_delta < -31 ) + * origin_x += 64; + * + * prev_rsb_delta = slot->rsb_delta; + * + * + * + * origin_x += slot->advance.x; + * endfor + * ``` + * + * If you use strong auto-hinting, you **must** apply these delta values! + * Otherwise you will experience far too large inter-glyph spacing at + * small rendering sizes in most cases. Note that it doesn't harm to use + * the above code for other hinting modes also, since the delta values + * are zero then. + */ + typedef struct FT_GlyphSlotRec_ + { + FT_Library library; + FT_Face face; + FT_GlyphSlot next; + FT_UInt glyph_index; /* new in 2.10; was reserved previously */ + FT_Generic generic; + + FT_Glyph_Metrics metrics; + FT_Fixed linearHoriAdvance; + FT_Fixed linearVertAdvance; + FT_Vector advance; + + FT_Glyph_Format format; + + FT_Bitmap bitmap; + FT_Int bitmap_left; + FT_Int bitmap_top; + + FT_Outline outline; + + FT_UInt num_subglyphs; + FT_SubGlyph subglyphs; + + void* control_data; + long control_len; + + FT_Pos lsb_delta; + FT_Pos rsb_delta; + + void* other; + + FT_Slot_Internal internal; + + } FT_GlyphSlotRec; + + + /*************************************************************************/ + /*************************************************************************/ + /* */ + /* F U N C T I O N S */ + /* */ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @section: + * library_setup + * + */ + + /************************************************************************** + * + * @function: + * FT_Init_FreeType + * + * @description: + * Initialize a new FreeType library object. The set of modules that are + * registered by this function is determined at build time. + * + * @output: + * alibrary :: + * A handle to a new library object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * In case you want to provide your own memory allocating routines, use + * @FT_New_Library instead, followed by a call to @FT_Add_Default_Modules + * (or a series of calls to @FT_Add_Module) and + * @FT_Set_Default_Properties. + * + * See the documentation of @FT_Library and @FT_Face for multi-threading + * issues. + * + * If you need reference-counting (cf. @FT_Reference_Library), use + * @FT_New_Library and @FT_Done_Library. + * + * If compilation option `FT_CONFIG_OPTION_ENVIRONMENT_PROPERTIES` is + * set, this function reads the `FREETYPE_PROPERTIES` environment + * variable to control driver properties. See section @properties for + * more. + */ + FT_EXPORT( FT_Error ) + FT_Init_FreeType( FT_Library *alibrary ); + + + /************************************************************************** + * + * @function: + * FT_Done_FreeType + * + * @description: + * Destroy a given FreeType library object and all of its children, + * including resources, drivers, faces, sizes, etc. + * + * @input: + * library :: + * A handle to the target library object. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Done_FreeType( FT_Library library ); + + + /************************************************************************** + * + * @section: + * face_creation + * + */ + + /************************************************************************** + * + * @enum: + * FT_OPEN_XXX + * + * @description: + * A list of bit field constants used within the `flags` field of the + * @FT_Open_Args structure. + * + * @values: + * FT_OPEN_MEMORY :: + * This is a memory-based stream. + * + * FT_OPEN_STREAM :: + * Copy the stream from the `stream` field. + * + * FT_OPEN_PATHNAME :: + * Create a new input stream from a C~path name. + * + * FT_OPEN_DRIVER :: + * Use the `driver` field. + * + * FT_OPEN_PARAMS :: + * Use the `num_params` and `params` fields. + * + * @note: + * The `FT_OPEN_MEMORY`, `FT_OPEN_STREAM`, and `FT_OPEN_PATHNAME` flags + * are mutually exclusive. + */ +#define FT_OPEN_MEMORY 0x1 +#define FT_OPEN_STREAM 0x2 +#define FT_OPEN_PATHNAME 0x4 +#define FT_OPEN_DRIVER 0x8 +#define FT_OPEN_PARAMS 0x10 + + + /* these constants are deprecated; use the corresponding `FT_OPEN_XXX` */ + /* values instead */ +#define ft_open_memory FT_OPEN_MEMORY +#define ft_open_stream FT_OPEN_STREAM +#define ft_open_pathname FT_OPEN_PATHNAME +#define ft_open_driver FT_OPEN_DRIVER +#define ft_open_params FT_OPEN_PARAMS + + + /************************************************************************** + * + * @struct: + * FT_Parameter + * + * @description: + * A simple structure to pass more or less generic parameters to + * @FT_Open_Face and @FT_Face_Properties. + * + * @fields: + * tag :: + * A four-byte identification tag. + * + * data :: + * A pointer to the parameter data. + * + * @note: + * The ID and function of parameters are driver-specific. See section + * @parameter_tags for more information. + */ + typedef struct FT_Parameter_ + { + FT_ULong tag; + FT_Pointer data; + + } FT_Parameter; + + + /************************************************************************** + * + * @struct: + * FT_Open_Args + * + * @description: + * A structure to indicate how to open a new font file or stream. A + * pointer to such a structure can be used as a parameter for the + * functions @FT_Open_Face and @FT_Attach_Stream. + * + * @fields: + * flags :: + * A set of bit flags indicating how to use the structure. + * + * memory_base :: + * The first byte of the file in memory. + * + * memory_size :: + * The size in bytes of the file in memory. + * + * pathname :: + * A pointer to an 8-bit file pathname, which must be a C~string (i.e., + * no null bytes except at the very end). The pointer is not owned by + * FreeType. + * + * stream :: + * A handle to a source stream object. + * + * driver :: + * This field is exclusively used by @FT_Open_Face; it simply specifies + * the font driver to use for opening the face. If set to `NULL`, + * FreeType tries to load the face with each one of the drivers in its + * list. + * + * num_params :: + * The number of extra parameters. + * + * params :: + * Extra parameters passed to the font driver when opening a new face. + * + * @note: + * The stream type is determined by the contents of `flags`: + * + * If the @FT_OPEN_MEMORY bit is set, assume that this is a memory file + * of `memory_size` bytes, located at `memory_address`. The data are not + * copied, and the client is responsible for releasing and destroying + * them _after_ the corresponding call to @FT_Done_Face. + * + * Otherwise, if the @FT_OPEN_STREAM bit is set, assume that a custom + * input stream `stream` is used. + * + * Otherwise, if the @FT_OPEN_PATHNAME bit is set, assume that this is a + * normal file and use `pathname` to open it. + * + * If none of the above bits are set or if multiple are set at the same + * time, the flags are invalid and @FT_Open_Face fails. + * + * If the @FT_OPEN_DRIVER bit is set, @FT_Open_Face only tries to open + * the file with the driver whose handler is in `driver`. + * + * If the @FT_OPEN_PARAMS bit is set, the parameters given by + * `num_params` and `params` is used. They are ignored otherwise. + * + * Ideally, both the `pathname` and `params` fields should be tagged as + * 'const'; this is missing for API backward compatibility. In other + * words, applications should treat them as read-only. + */ + typedef struct FT_Open_Args_ + { + FT_UInt flags; + const FT_Byte* memory_base; + FT_Long memory_size; + FT_String* pathname; + FT_Stream stream; + FT_Module driver; + FT_Int num_params; + FT_Parameter* params; + + } FT_Open_Args; + + + /************************************************************************** + * + * @function: + * FT_New_Face + * + * @description: + * Call @FT_Open_Face to open a font by its pathname. + * + * @inout: + * library :: + * A handle to the library resource. + * + * @input: + * pathname :: + * A path to the font file. + * + * face_index :: + * See @FT_Open_Face for a detailed description of this parameter. + * + * @output: + * aface :: + * A handle to a new face object. If `face_index` is greater than or + * equal to zero, it must be non-`NULL`. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The `pathname` string should be recognizable as such by a standard + * `fopen` call on your system; in particular, this means that `pathname` + * must not contain null bytes. If that is not sufficient to address all + * file name possibilities (for example, to handle wide character file + * names on Windows in UTF-16 encoding) you might use @FT_Open_Face to + * pass a memory array or a stream object instead. + * + * Use @FT_Done_Face to destroy the created @FT_Face object (along with + * its slot and sizes). + */ + FT_EXPORT( FT_Error ) + FT_New_Face( FT_Library library, + const char* filepathname, + FT_Long face_index, + FT_Face *aface ); + + + /************************************************************************** + * + * @function: + * FT_New_Memory_Face + * + * @description: + * Call @FT_Open_Face to open a font that has been loaded into memory. + * + * @inout: + * library :: + * A handle to the library resource. + * + * @input: + * file_base :: + * A pointer to the beginning of the font data. + * + * file_size :: + * The size of the memory chunk used by the font data. + * + * face_index :: + * See @FT_Open_Face for a detailed description of this parameter. + * + * @output: + * aface :: + * A handle to a new face object. If `face_index` is greater than or + * equal to zero, it must be non-`NULL`. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You must not deallocate the memory before calling @FT_Done_Face. + */ + FT_EXPORT( FT_Error ) + FT_New_Memory_Face( FT_Library library, + const FT_Byte* file_base, + FT_Long file_size, + FT_Long face_index, + FT_Face *aface ); + + + /************************************************************************** + * + * @function: + * FT_Open_Face + * + * @description: + * Create a face object from a given resource described by @FT_Open_Args. + * + * @inout: + * library :: + * A handle to the library resource. + * + * @input: + * args :: + * A pointer to an `FT_Open_Args` structure that must be filled by the + * caller. + * + * face_index :: + * This field holds two different values. Bits 0-15 are the index of + * the face in the font file (starting with value~0). Set it to~0 if + * there is only one face in the font file. + * + * [Since 2.6.1] Bits 16-30 are relevant to GX and OpenType variation + * fonts only, specifying the named instance index for the current face + * index (starting with value~1; value~0 makes FreeType ignore named + * instances). For non-variation fonts, bits 16-30 are ignored. + * Assuming that you want to access the third named instance in face~4, + * `face_index` should be set to 0x00030004. If you want to access + * face~4 without variation handling, simply set `face_index` to + * value~4. + * + * `FT_Open_Face` and its siblings can be used to quickly check whether + * the font format of a given font resource is supported by FreeType. + * In general, if the `face_index` argument is negative, the function's + * return value is~0 if the font format is recognized, or non-zero + * otherwise. The function allocates a more or less empty face handle + * in `*aface` (if `aface` isn't `NULL`); the only two useful fields in + * this special case are `face->num_faces` and `face->style_flags`. + * For any negative value of `face_index`, `face->num_faces` gives the + * number of faces within the font file. For the negative value + * '-(N+1)' (with 'N' a non-negative 16-bit value), bits 16-30 in + * `face->style_flags` give the number of named instances in face 'N' + * if we have a variation font (or zero otherwise). After examination, + * the returned @FT_Face structure should be deallocated with a call to + * @FT_Done_Face. + * + * @output: + * aface :: + * A handle to a new face object. If `face_index` is greater than or + * equal to zero, it must be non-`NULL`. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Unlike FreeType 1.x, this function automatically creates a glyph slot + * for the face object that can be accessed directly through + * `face->glyph`. + * + * Each new face object created with this function also owns a default + * @FT_Size object, accessible as `face->size`. + * + * One @FT_Library instance can have multiple face objects, that is, + * @FT_Open_Face and its siblings can be called multiple times using the + * same `library` argument. + * + * See the discussion of reference counters in the description of + * @FT_Reference_Face. + * + * If `FT_OPEN_STREAM` is set in `args->flags`, the stream in + * `args->stream` is automatically closed before this function returns + * any error (including `FT_Err_Invalid_Argument`). + * + * @example: + * To loop over all faces, use code similar to the following snippet + * (omitting the error handling). + * + * ``` + * ... + * FT_Face face; + * FT_Long i, num_faces; + * + * + * error = FT_Open_Face( library, args, -1, &face ); + * if ( error ) { ... } + * + * num_faces = face->num_faces; + * FT_Done_Face( face ); + * + * for ( i = 0; i < num_faces; i++ ) + * { + * ... + * error = FT_Open_Face( library, args, i, &face ); + * ... + * FT_Done_Face( face ); + * ... + * } + * ``` + * + * To loop over all valid values for `face_index`, use something similar + * to the following snippet, again without error handling. The code + * accesses all faces immediately (thus only a single call of + * `FT_Open_Face` within the do-loop), with and without named instances. + * + * ``` + * ... + * FT_Face face; + * + * FT_Long num_faces = 0; + * FT_Long num_instances = 0; + * + * FT_Long face_idx = 0; + * FT_Long instance_idx = 0; + * + * + * do + * { + * FT_Long id = ( instance_idx << 16 ) + face_idx; + * + * + * error = FT_Open_Face( library, args, id, &face ); + * if ( error ) { ... } + * + * num_faces = face->num_faces; + * num_instances = face->style_flags >> 16; + * + * ... + * + * FT_Done_Face( face ); + * + * if ( instance_idx < num_instances ) + * instance_idx++; + * else + * { + * face_idx++; + * instance_idx = 0; + * } + * + * } while ( face_idx < num_faces ) + * ``` + */ + FT_EXPORT( FT_Error ) + FT_Open_Face( FT_Library library, + const FT_Open_Args* args, + FT_Long face_index, + FT_Face *aface ); + + + /************************************************************************** + * + * @function: + * FT_Attach_File + * + * @description: + * Call @FT_Attach_Stream to attach a file. + * + * @inout: + * face :: + * The target face object. + * + * @input: + * filepathname :: + * The pathname. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Attach_File( FT_Face face, + const char* filepathname ); + + + /************************************************************************** + * + * @function: + * FT_Attach_Stream + * + * @description: + * 'Attach' data to a face object. Normally, this is used to read + * additional information for the face object. For example, you can + * attach an AFM file that comes with a Type~1 font to get the kerning + * values and other metrics. + * + * @inout: + * face :: + * The target face object. + * + * @input: + * parameters :: + * A pointer to @FT_Open_Args that must be filled by the caller. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The meaning of the 'attach' (i.e., what really happens when the new + * file is read) is not fixed by FreeType itself. It really depends on + * the font format (and thus the font driver). + * + * Client applications are expected to know what they are doing when + * invoking this function. Most drivers simply do not implement file or + * stream attachments. + */ + FT_EXPORT( FT_Error ) + FT_Attach_Stream( FT_Face face, + const FT_Open_Args* parameters ); + + + /************************************************************************** + * + * @function: + * FT_Reference_Face + * + * @description: + * A counter gets initialized to~1 at the time an @FT_Face structure is + * created. This function increments the counter. @FT_Done_Face then + * only destroys a face if the counter is~1, otherwise it simply + * decrements the counter. + * + * This function helps in managing life-cycles of structures that + * reference @FT_Face objects. + * + * @input: + * face :: + * A handle to a target face object. + * + * @return: + * FreeType error code. 0~means success. + * + * @since: + * 2.4.2 + * + */ + FT_EXPORT( FT_Error ) + FT_Reference_Face( FT_Face face ); + + + /************************************************************************** + * + * @function: + * FT_Done_Face + * + * @description: + * Discard a given face object, as well as all of its child slots and + * sizes. + * + * @input: + * face :: + * A handle to a target face object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * See the discussion of reference counters in the description of + * @FT_Reference_Face. + */ + FT_EXPORT( FT_Error ) + FT_Done_Face( FT_Face face ); + + + /************************************************************************** + * + * @section: + * sizing_and_scaling + * + */ + + /************************************************************************** + * + * @function: + * FT_Select_Size + * + * @description: + * Select a bitmap strike. To be more precise, this function sets the + * scaling factors of the active @FT_Size object in a face so that + * bitmaps from this particular strike are taken by @FT_Load_Glyph and + * friends. + * + * @inout: + * face :: + * A handle to a target face object. + * + * @input: + * strike_index :: + * The index of the bitmap strike in the `available_sizes` field of + * @FT_FaceRec structure. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * For bitmaps embedded in outline fonts it is common that only a subset + * of the available glyphs at a given ppem value is available. FreeType + * silently uses outlines if there is no bitmap for a given glyph index. + * + * For GX and OpenType variation fonts, a bitmap strike makes sense only + * if the default instance is active (that is, no glyph variation takes + * place); otherwise, FreeType simply ignores bitmap strikes. The same + * is true for all named instances that are different from the default + * instance. + * + * Don't use this function if you are using the FreeType cache API. + */ + FT_EXPORT( FT_Error ) + FT_Select_Size( FT_Face face, + FT_Int strike_index ); + + + /************************************************************************** + * + * @enum: + * FT_Size_Request_Type + * + * @description: + * An enumeration type that lists the supported size request types, i.e., + * what input size (in font units) maps to the requested output size (in + * pixels, as computed from the arguments of @FT_Size_Request). + * + * @values: + * FT_SIZE_REQUEST_TYPE_NOMINAL :: + * The nominal size. The `units_per_EM` field of @FT_FaceRec is used + * to determine both scaling values. + * + * This is the standard scaling found in most applications. In + * particular, use this size request type for TrueType fonts if they + * provide optical scaling or something similar. Note, however, that + * `units_per_EM` is a rather abstract value which bears no relation to + * the actual size of the glyphs in a font. + * + * FT_SIZE_REQUEST_TYPE_REAL_DIM :: + * The real dimension. The sum of the `ascender` and (minus of) the + * `descender` fields of @FT_FaceRec is used to determine both scaling + * values. + * + * FT_SIZE_REQUEST_TYPE_BBOX :: + * The font bounding box. The width and height of the `bbox` field of + * @FT_FaceRec are used to determine the horizontal and vertical + * scaling value, respectively. + * + * FT_SIZE_REQUEST_TYPE_CELL :: + * The `max_advance_width` field of @FT_FaceRec is used to determine + * the horizontal scaling value; the vertical scaling value is + * determined the same way as @FT_SIZE_REQUEST_TYPE_REAL_DIM does. + * Finally, both scaling values are set to the smaller one. This type + * is useful if you want to specify the font size for, say, a window of + * a given dimension and 80x24 cells. + * + * FT_SIZE_REQUEST_TYPE_SCALES :: + * Specify the scaling values directly. + * + * @note: + * The above descriptions only apply to scalable formats. For bitmap + * formats, the behaviour is up to the driver. + * + * See the note section of @FT_Size_Metrics if you wonder how size + * requesting relates to scaling values. + */ + typedef enum FT_Size_Request_Type_ + { + FT_SIZE_REQUEST_TYPE_NOMINAL, + FT_SIZE_REQUEST_TYPE_REAL_DIM, + FT_SIZE_REQUEST_TYPE_BBOX, + FT_SIZE_REQUEST_TYPE_CELL, + FT_SIZE_REQUEST_TYPE_SCALES, + + FT_SIZE_REQUEST_TYPE_MAX + + } FT_Size_Request_Type; + + + /************************************************************************** + * + * @struct: + * FT_Size_RequestRec + * + * @description: + * A structure to model a size request. + * + * @fields: + * type :: + * See @FT_Size_Request_Type. + * + * width :: + * The desired width, given as a 26.6 fractional point value (with 72pt + * = 1in). + * + * height :: + * The desired height, given as a 26.6 fractional point value (with + * 72pt = 1in). + * + * horiResolution :: + * The horizontal resolution (dpi, i.e., pixels per inch). If set to + * zero, `width` is treated as a 26.6 fractional **pixel** value, which + * gets internally rounded to an integer. + * + * vertResolution :: + * The vertical resolution (dpi, i.e., pixels per inch). If set to + * zero, `height` is treated as a 26.6 fractional **pixel** value, + * which gets internally rounded to an integer. + * + * @note: + * If `width` is zero, the horizontal scaling value is set equal to the + * vertical scaling value, and vice versa. + * + * If `type` is `FT_SIZE_REQUEST_TYPE_SCALES`, `width` and `height` are + * interpreted directly as 16.16 fractional scaling values, without any + * further modification, and both `horiResolution` and `vertResolution` + * are ignored. + */ + typedef struct FT_Size_RequestRec_ + { + FT_Size_Request_Type type; + FT_Long width; + FT_Long height; + FT_UInt horiResolution; + FT_UInt vertResolution; + + } FT_Size_RequestRec; + + + /************************************************************************** + * + * @struct: + * FT_Size_Request + * + * @description: + * A handle to a size request structure. + */ + typedef struct FT_Size_RequestRec_ *FT_Size_Request; + + + /************************************************************************** + * + * @function: + * FT_Request_Size + * + * @description: + * Resize the scale of the active @FT_Size object in a face. + * + * @inout: + * face :: + * A handle to a target face object. + * + * @input: + * req :: + * A pointer to a @FT_Size_RequestRec. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Although drivers may select the bitmap strike matching the request, + * you should not rely on this if you intend to select a particular + * bitmap strike. Use @FT_Select_Size instead in that case. + * + * The relation between the requested size and the resulting glyph size + * is dependent entirely on how the size is defined in the source face. + * The font designer chooses the final size of each glyph relative to + * this size. For more information refer to + * 'https://www.freetype.org/freetype2/docs/glyphs/glyphs-2.html'. + * + * Contrary to @FT_Set_Char_Size, this function doesn't have special code + * to normalize zero-valued widths, heights, or resolutions, which are + * treated as @FT_LOAD_NO_SCALE. + * + * Don't use this function if you are using the FreeType cache API. + */ + FT_EXPORT( FT_Error ) + FT_Request_Size( FT_Face face, + FT_Size_Request req ); + + + /************************************************************************** + * + * @function: + * FT_Set_Char_Size + * + * @description: + * Call @FT_Request_Size to request the nominal size (in points). + * + * @inout: + * face :: + * A handle to a target face object. + * + * @input: + * char_width :: + * The nominal width, in 26.6 fractional points. + * + * char_height :: + * The nominal height, in 26.6 fractional points. + * + * horz_resolution :: + * The horizontal resolution in dpi. + * + * vert_resolution :: + * The vertical resolution in dpi. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * While this function allows fractional points as input values, the + * resulting ppem value for the given resolution is always rounded to the + * nearest integer. + * + * If either the character width or height is zero, it is set equal to + * the other value. + * + * If either the horizontal or vertical resolution is zero, it is set + * equal to the other value. + * + * A character width or height smaller than 1pt is set to 1pt; if both + * resolution values are zero, they are set to 72dpi. + * + * Don't use this function if you are using the FreeType cache API. + */ + FT_EXPORT( FT_Error ) + FT_Set_Char_Size( FT_Face face, + FT_F26Dot6 char_width, + FT_F26Dot6 char_height, + FT_UInt horz_resolution, + FT_UInt vert_resolution ); + + + /************************************************************************** + * + * @function: + * FT_Set_Pixel_Sizes + * + * @description: + * Call @FT_Request_Size to request the nominal size (in pixels). + * + * @inout: + * face :: + * A handle to the target face object. + * + * @input: + * pixel_width :: + * The nominal width, in pixels. + * + * pixel_height :: + * The nominal height, in pixels. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You should not rely on the resulting glyphs matching or being + * constrained to this pixel size. Refer to @FT_Request_Size to + * understand how requested sizes relate to actual sizes. + * + * Don't use this function if you are using the FreeType cache API. + */ + FT_EXPORT( FT_Error ) + FT_Set_Pixel_Sizes( FT_Face face, + FT_UInt pixel_width, + FT_UInt pixel_height ); + + + /************************************************************************** + * + * @section: + * glyph_retrieval + * + */ + + /************************************************************************** + * + * @function: + * FT_Load_Glyph + * + * @description: + * Load a glyph into the glyph slot of a face object. + * + * @inout: + * face :: + * A handle to the target face object where the glyph is loaded. + * + * @input: + * glyph_index :: + * The index of the glyph in the font file. For CID-keyed fonts + * (either in PS or in CFF format) this argument specifies the CID + * value. + * + * load_flags :: + * A flag indicating what to load for this glyph. The @FT_LOAD_XXX + * flags can be used to control the glyph loading process (e.g., + * whether the outline should be scaled, whether to load bitmaps or + * not, whether to hint the outline, etc). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * For proper scaling and hinting, the active @FT_Size object owned by + * the face has to be meaningfully initialized by calling + * @FT_Set_Char_Size before this function, for example. The loaded + * glyph may be transformed. See @FT_Set_Transform for the details. + * + * For subsetted CID-keyed fonts, `FT_Err_Invalid_Argument` is returned + * for invalid CID values (that is, for CID values that don't have a + * corresponding glyph in the font). See the discussion of the + * @FT_FACE_FLAG_CID_KEYED flag for more details. + * + * If you receive `FT_Err_Glyph_Too_Big`, try getting the glyph outline + * at EM size, then scale it manually and fill it as a graphics + * operation. + */ + FT_EXPORT( FT_Error ) + FT_Load_Glyph( FT_Face face, + FT_UInt glyph_index, + FT_Int32 load_flags ); + + + /************************************************************************** + * + * @section: + * character_mapping + * + */ + + /************************************************************************** + * + * @function: + * FT_Load_Char + * + * @description: + * Load a glyph into the glyph slot of a face object, accessed by its + * character code. + * + * @inout: + * face :: + * A handle to a target face object where the glyph is loaded. + * + * @input: + * char_code :: + * The glyph's character code, according to the current charmap used in + * the face. + * + * load_flags :: + * A flag indicating what to load for this glyph. The @FT_LOAD_XXX + * constants can be used to control the glyph loading process (e.g., + * whether the outline should be scaled, whether to load bitmaps or + * not, whether to hint the outline, etc). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function simply calls @FT_Get_Char_Index and @FT_Load_Glyph. + * + * Many fonts contain glyphs that can't be loaded by this function since + * its glyph indices are not listed in any of the font's charmaps. + * + * If no active cmap is set up (i.e., `face->charmap` is zero), the call + * to @FT_Get_Char_Index is omitted, and the function behaves identically + * to @FT_Load_Glyph. + */ + FT_EXPORT( FT_Error ) + FT_Load_Char( FT_Face face, + FT_ULong char_code, + FT_Int32 load_flags ); + + + /************************************************************************** + * + * @section: + * glyph_retrieval + * + */ + + /************************************************************************** + * + * @enum: + * FT_LOAD_XXX + * + * @description: + * A list of bit field constants for @FT_Load_Glyph to indicate what kind + * of operations to perform during glyph loading. + * + * @values: + * FT_LOAD_DEFAULT :: + * Corresponding to~0, this value is used as the default glyph load + * operation. In this case, the following happens: + * + * 1. FreeType looks for a bitmap for the glyph corresponding to the + * face's current size. If one is found, the function returns. The + * bitmap data can be accessed from the glyph slot (see note below). + * + * 2. If no embedded bitmap is searched for or found, FreeType looks + * for a scalable outline. If one is found, it is loaded from the font + * file, scaled to device pixels, then 'hinted' to the pixel grid in + * order to optimize it. The outline data can be accessed from the + * glyph slot (see note below). + * + * Note that by default the glyph loader doesn't render outlines into + * bitmaps. The following flags are used to modify this default + * behaviour to more specific and useful cases. + * + * FT_LOAD_NO_SCALE :: + * Don't scale the loaded outline glyph but keep it in font units. + * This flag is also assumed if @FT_Size owned by the face was not + * properly initialized. + * + * This flag implies @FT_LOAD_NO_HINTING and @FT_LOAD_NO_BITMAP, and + * unsets @FT_LOAD_RENDER. + * + * If the font is 'tricky' (see @FT_FACE_FLAG_TRICKY for more), using + * `FT_LOAD_NO_SCALE` usually yields meaningless outlines because the + * subglyphs must be scaled and positioned with hinting instructions. + * This can be solved by loading the font without `FT_LOAD_NO_SCALE` + * and setting the character size to `font->units_per_EM`. + * + * FT_LOAD_NO_HINTING :: + * Disable hinting. This generally generates 'blurrier' bitmap glyphs + * when the glyphs are rendered in any of the anti-aliased modes. See + * also the note below. + * + * This flag is implied by @FT_LOAD_NO_SCALE. + * + * FT_LOAD_RENDER :: + * Call @FT_Render_Glyph after the glyph is loaded. By default, the + * glyph is rendered in @FT_RENDER_MODE_NORMAL mode. This can be + * overridden by @FT_LOAD_TARGET_XXX or @FT_LOAD_MONOCHROME. + * + * This flag is unset by @FT_LOAD_NO_SCALE. + * + * FT_LOAD_NO_BITMAP :: + * Ignore bitmap strikes when loading. Bitmap-only fonts ignore this + * flag. + * + * @FT_LOAD_NO_SCALE always sets this flag. + * + * FT_LOAD_SBITS_ONLY :: + * [Since 2.12] This is the opposite of @FT_LOAD_NO_BITMAP, more or + * less: @FT_Load_Glyph returns `FT_Err_Invalid_Argument` if the face + * contains a bitmap strike for the given size (or the strike selected + * by @FT_Select_Size) but there is no glyph in the strike. + * + * Note that this load flag was part of FreeType since version 2.0.6 + * but previously tagged as internal. + * + * FT_LOAD_VERTICAL_LAYOUT :: + * Load the glyph for vertical text layout. In particular, the + * `advance` value in the @FT_GlyphSlotRec structure is set to the + * `vertAdvance` value of the `metrics` field. + * + * In case @FT_HAS_VERTICAL doesn't return true, you shouldn't use this + * flag currently. Reason is that in this case vertical metrics get + * synthesized, and those values are not always consistent across + * various font formats. + * + * FT_LOAD_FORCE_AUTOHINT :: + * Prefer the auto-hinter over the font's native hinter. See also the + * note below. + * + * FT_LOAD_PEDANTIC :: + * Make the font driver perform pedantic verifications during glyph + * loading and hinting. This is mostly used to detect broken glyphs in + * fonts. By default, FreeType tries to handle broken fonts also. + * + * In particular, errors from the TrueType bytecode engine are not + * passed to the application if this flag is not set; this might result + * in partially hinted or distorted glyphs in case a glyph's bytecode + * is buggy. + * + * FT_LOAD_NO_RECURSE :: + * Don't load composite glyphs recursively. Instead, the font driver + * fills the `num_subglyph` and `subglyphs` values of the glyph slot; + * it also sets `glyph->format` to @FT_GLYPH_FORMAT_COMPOSITE. The + * description of subglyphs can then be accessed with + * @FT_Get_SubGlyph_Info. + * + * Don't use this flag for retrieving metrics information since some + * font drivers only return rudimentary data. + * + * This flag implies @FT_LOAD_NO_SCALE and @FT_LOAD_IGNORE_TRANSFORM. + * + * FT_LOAD_IGNORE_TRANSFORM :: + * Ignore the transform matrix set by @FT_Set_Transform. + * + * FT_LOAD_MONOCHROME :: + * This flag is used with @FT_LOAD_RENDER to indicate that you want to + * render an outline glyph to a 1-bit monochrome bitmap glyph, with + * 8~pixels packed into each byte of the bitmap data. + * + * Note that this has no effect on the hinting algorithm used. You + * should rather use @FT_LOAD_TARGET_MONO so that the + * monochrome-optimized hinting algorithm is used. + * + * FT_LOAD_LINEAR_DESIGN :: + * Keep `linearHoriAdvance` and `linearVertAdvance` fields of + * @FT_GlyphSlotRec in font units. See @FT_GlyphSlotRec for details. + * + * FT_LOAD_NO_AUTOHINT :: + * Disable the auto-hinter. See also the note below. + * + * FT_LOAD_COLOR :: + * Load colored glyphs. FreeType searches in the following order; + * there are slight differences depending on the font format. + * + * [Since 2.5] Load embedded color bitmap images (provided + * @FT_LOAD_NO_BITMAP is not set). The resulting color bitmaps, if + * available, have the @FT_PIXEL_MODE_BGRA format, with pre-multiplied + * color channels. If the flag is not set and color bitmaps are found, + * they are converted to 256-level gray bitmaps, using the + * @FT_PIXEL_MODE_GRAY format. + * + * [Since 2.12] If the glyph index maps to an entry in the face's + * 'SVG~' table, load the associated SVG document from this table and + * set the `format` field of @FT_GlyphSlotRec to @FT_GLYPH_FORMAT_SVG + * ([since 2.13.1] provided @FT_LOAD_NO_SVG is not set). Note that + * FreeType itself can't render SVG documents; however, the library + * provides hooks to seamlessly integrate an external renderer. See + * sections @ot_svg_driver and @svg_fonts for more. + * + * [Since 2.10, experimental] If the glyph index maps to an entry in + * the face's 'COLR' table with a 'CPAL' palette table (as defined in + * the OpenType specification), make @FT_Render_Glyph provide a default + * blending of the color glyph layers associated with the glyph index, + * using the same bitmap format as embedded color bitmap images. This + * is mainly for convenience and works only for glyphs in 'COLR' v0 + * tables (or glyphs in 'COLR' v1 tables that exclusively use v0 + * features). For full control of color layers use + * @FT_Get_Color_Glyph_Layer and FreeType's color functions like + * @FT_Palette_Select instead of setting @FT_LOAD_COLOR for rendering + * so that the client application can handle blending by itself. + * + * FT_LOAD_NO_SVG :: + * [Since 2.13.1] Ignore SVG glyph data when loading. + * + * FT_LOAD_COMPUTE_METRICS :: + * [Since 2.6.1] Compute glyph metrics from the glyph data, without the + * use of bundled metrics tables (for example, the 'hdmx' table in + * TrueType fonts). This flag is mainly used by font validating or + * font editing applications, which need to ignore, verify, or edit + * those tables. + * + * Currently, this flag is only implemented for TrueType fonts. + * + * FT_LOAD_BITMAP_METRICS_ONLY :: + * [Since 2.7.1] Request loading of the metrics and bitmap image + * information of a (possibly embedded) bitmap glyph without allocating + * or copying the bitmap image data itself. No effect if the target + * glyph is not a bitmap image. + * + * This flag unsets @FT_LOAD_RENDER. + * + * FT_LOAD_CROP_BITMAP :: + * Ignored. Deprecated. + * + * FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH :: + * Ignored. Deprecated. + * + * @note: + * By default, hinting is enabled and the font's native hinter (see + * @FT_FACE_FLAG_HINTER) is preferred over the auto-hinter. You can + * disable hinting by setting @FT_LOAD_NO_HINTING or change the + * precedence by setting @FT_LOAD_FORCE_AUTOHINT. You can also set + * @FT_LOAD_NO_AUTOHINT in case you don't want the auto-hinter to be used + * at all. + * + * See the description of @FT_FACE_FLAG_TRICKY for a special exception + * (affecting only a handful of Asian fonts). + * + * Besides deciding which hinter to use, you can also decide which + * hinting algorithm to use. See @FT_LOAD_TARGET_XXX for details. + * + * Note that the auto-hinter needs a valid Unicode cmap (either a native + * one or synthesized by FreeType) for producing correct results. If a + * font provides an incorrect mapping (for example, assigning the + * character code U+005A, LATIN CAPITAL LETTER~Z, to a glyph depicting a + * mathematical integral sign), the auto-hinter might produce useless + * results. + * + */ +#define FT_LOAD_DEFAULT 0x0 +#define FT_LOAD_NO_SCALE ( 1L << 0 ) +#define FT_LOAD_NO_HINTING ( 1L << 1 ) +#define FT_LOAD_RENDER ( 1L << 2 ) +#define FT_LOAD_NO_BITMAP ( 1L << 3 ) +#define FT_LOAD_VERTICAL_LAYOUT ( 1L << 4 ) +#define FT_LOAD_FORCE_AUTOHINT ( 1L << 5 ) +#define FT_LOAD_CROP_BITMAP ( 1L << 6 ) +#define FT_LOAD_PEDANTIC ( 1L << 7 ) +#define FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH ( 1L << 9 ) +#define FT_LOAD_NO_RECURSE ( 1L << 10 ) +#define FT_LOAD_IGNORE_TRANSFORM ( 1L << 11 ) +#define FT_LOAD_MONOCHROME ( 1L << 12 ) +#define FT_LOAD_LINEAR_DESIGN ( 1L << 13 ) +#define FT_LOAD_SBITS_ONLY ( 1L << 14 ) +#define FT_LOAD_NO_AUTOHINT ( 1L << 15 ) + /* Bits 16-19 are used by `FT_LOAD_TARGET_` */ +#define FT_LOAD_COLOR ( 1L << 20 ) +#define FT_LOAD_COMPUTE_METRICS ( 1L << 21 ) +#define FT_LOAD_BITMAP_METRICS_ONLY ( 1L << 22 ) +#define FT_LOAD_NO_SVG ( 1L << 24 ) + + /* */ + + /* used internally only by certain font drivers */ +#define FT_LOAD_ADVANCE_ONLY ( 1L << 8 ) +#define FT_LOAD_SVG_ONLY ( 1L << 23 ) + + + /************************************************************************** + * + * @enum: + * FT_LOAD_TARGET_XXX + * + * @description: + * A list of values to select a specific hinting algorithm for the + * hinter. You should OR one of these values to your `load_flags` when + * calling @FT_Load_Glyph. + * + * Note that a font's native hinters may ignore the hinting algorithm you + * have specified (e.g., the TrueType bytecode interpreter). You can set + * @FT_LOAD_FORCE_AUTOHINT to ensure that the auto-hinter is used. + * + * @values: + * FT_LOAD_TARGET_NORMAL :: + * The default hinting algorithm, optimized for standard gray-level + * rendering. For monochrome output, use @FT_LOAD_TARGET_MONO instead. + * + * FT_LOAD_TARGET_LIGHT :: + * A lighter hinting algorithm for gray-level modes. Many generated + * glyphs are fuzzier but better resemble their original shape. This + * is achieved by snapping glyphs to the pixel grid only vertically + * (Y-axis), as is done by FreeType's new CFF engine or Microsoft's + * ClearType font renderer. This preserves inter-glyph spacing in + * horizontal text. The snapping is done either by the native font + * driver, if the driver itself and the font support it, or by the + * auto-hinter. + * + * Advance widths are rounded to integer values; however, using the + * `lsb_delta` and `rsb_delta` fields of @FT_GlyphSlotRec, it is + * possible to get fractional advance widths for subpixel positioning + * (which is recommended to use). + * + * If configuration option `AF_CONFIG_OPTION_TT_SIZE_METRICS` is + * active, TrueType-like metrics are used to make this mode behave + * similarly as in unpatched FreeType versions between 2.4.6 and 2.7.1 + * (inclusive). + * + * FT_LOAD_TARGET_MONO :: + * Strong hinting algorithm that should only be used for monochrome + * output. The result is probably unpleasant if the glyph is rendered + * in non-monochrome modes. + * + * Note that for outline fonts only the TrueType font driver has proper + * monochrome hinting support, provided the TTFs contain hints for B/W + * rendering (which most fonts no longer provide). If these conditions + * are not met it is very likely that you get ugly results at smaller + * sizes. + * + * FT_LOAD_TARGET_LCD :: + * A variant of @FT_LOAD_TARGET_LIGHT optimized for horizontally + * decimated LCD displays. + * + * FT_LOAD_TARGET_LCD_V :: + * A variant of @FT_LOAD_TARGET_NORMAL optimized for vertically + * decimated LCD displays. + * + * @note: + * You should use only _one_ of the `FT_LOAD_TARGET_XXX` values in your + * `load_flags`. They can't be ORed. + * + * If @FT_LOAD_RENDER is also set, the glyph is rendered in the + * corresponding mode (i.e., the mode that matches the used algorithm + * best). An exception is `FT_LOAD_TARGET_MONO` since it implies + * @FT_LOAD_MONOCHROME. + * + * You can use a hinting algorithm that doesn't correspond to the same + * rendering mode. As an example, it is possible to use the 'light' + * hinting algorithm and have the results rendered in horizontal LCD + * pixel mode, with code like + * + * ``` + * FT_Load_Glyph( face, glyph_index, + * load_flags | FT_LOAD_TARGET_LIGHT ); + * + * FT_Render_Glyph( face->glyph, FT_RENDER_MODE_LCD ); + * ``` + * + * In general, you should stick with one rendering mode. For example, + * switching between @FT_LOAD_TARGET_NORMAL and @FT_LOAD_TARGET_MONO + * enforces a lot of recomputation for TrueType fonts, which is slow. + * Another reason is caching: Selecting a different mode usually causes + * changes in both the outlines and the rasterized bitmaps; it is thus + * necessary to empty the cache after a mode switch to avoid false hits. + * + */ +#define FT_LOAD_TARGET_( x ) ( FT_STATIC_CAST( FT_Int32, (x) & 15 ) << 16 ) + +#define FT_LOAD_TARGET_NORMAL FT_LOAD_TARGET_( FT_RENDER_MODE_NORMAL ) +#define FT_LOAD_TARGET_LIGHT FT_LOAD_TARGET_( FT_RENDER_MODE_LIGHT ) +#define FT_LOAD_TARGET_MONO FT_LOAD_TARGET_( FT_RENDER_MODE_MONO ) +#define FT_LOAD_TARGET_LCD FT_LOAD_TARGET_( FT_RENDER_MODE_LCD ) +#define FT_LOAD_TARGET_LCD_V FT_LOAD_TARGET_( FT_RENDER_MODE_LCD_V ) + + + /************************************************************************** + * + * @macro: + * FT_LOAD_TARGET_MODE + * + * @description: + * Return the @FT_Render_Mode corresponding to a given + * @FT_LOAD_TARGET_XXX value. + * + */ +#define FT_LOAD_TARGET_MODE( x ) \ + FT_STATIC_CAST( FT_Render_Mode, ( (x) >> 16 ) & 15 ) + + + /************************************************************************** + * + * @section: + * sizing_and_scaling + * + */ + + /************************************************************************** + * + * @function: + * FT_Set_Transform + * + * @description: + * Set the transformation that is applied to glyph images when they are + * loaded into a glyph slot through @FT_Load_Glyph. + * + * @inout: + * face :: + * A handle to the source face object. + * + * @input: + * matrix :: + * A pointer to the transformation's 2x2 matrix. Use `NULL` for the + * identity matrix. + * delta :: + * A pointer to the translation vector. Use `NULL` for the null + * vector. + * + * @note: + * This function is provided as a convenience, but keep in mind that + * @FT_Matrix coefficients are only 16.16 fixed-point values, which can + * limit the accuracy of the results. Using floating-point computations + * to perform the transform directly in client code instead will always + * yield better numbers. + * + * The transformation is only applied to scalable image formats after the + * glyph has been loaded. It means that hinting is unaltered by the + * transformation and is performed on the character size given in the + * last call to @FT_Set_Char_Size or @FT_Set_Pixel_Sizes. + * + * Note that this also transforms the `face.glyph.advance` field, but + * **not** the values in `face.glyph.metrics`. + */ + FT_EXPORT( void ) + FT_Set_Transform( FT_Face face, + FT_Matrix* matrix, + FT_Vector* delta ); + + + /************************************************************************** + * + * @function: + * FT_Get_Transform + * + * @description: + * Return the transformation that is applied to glyph images when they + * are loaded into a glyph slot through @FT_Load_Glyph. See + * @FT_Set_Transform for more details. + * + * @input: + * face :: + * A handle to the source face object. + * + * @output: + * matrix :: + * A pointer to a transformation's 2x2 matrix. Set this to NULL if you + * are not interested in the value. + * + * delta :: + * A pointer to a translation vector. Set this to NULL if you are not + * interested in the value. + * + * @since: + * 2.11 + * + */ + FT_EXPORT( void ) + FT_Get_Transform( FT_Face face, + FT_Matrix* matrix, + FT_Vector* delta ); + + + /************************************************************************** + * + * @section: + * glyph_retrieval + * + */ + + /************************************************************************** + * + * @enum: + * FT_Render_Mode + * + * @description: + * Render modes supported by FreeType~2. Each mode corresponds to a + * specific type of scanline conversion performed on the outline. + * + * For bitmap fonts and embedded bitmaps the `bitmap->pixel_mode` field + * in the @FT_GlyphSlotRec structure gives the format of the returned + * bitmap. + * + * All modes except @FT_RENDER_MODE_MONO use 256 levels of opacity, + * indicating pixel coverage. Use linear alpha blending and gamma + * correction to correctly render non-monochrome glyph bitmaps onto a + * surface; see @FT_Render_Glyph. + * + * The @FT_RENDER_MODE_SDF is a special render mode that uses up to 256 + * distance values, indicating the signed distance from the grid position + * to the nearest outline. + * + * @values: + * FT_RENDER_MODE_NORMAL :: + * Default render mode; it corresponds to 8-bit anti-aliased bitmaps. + * + * FT_RENDER_MODE_LIGHT :: + * This is equivalent to @FT_RENDER_MODE_NORMAL. It is only defined as + * a separate value because render modes are also used indirectly to + * define hinting algorithm selectors. See @FT_LOAD_TARGET_XXX for + * details. + * + * FT_RENDER_MODE_MONO :: + * This mode corresponds to 1-bit bitmaps (with 2~levels of opacity). + * + * FT_RENDER_MODE_LCD :: + * This mode corresponds to horizontal RGB and BGR subpixel displays + * like LCD screens. It produces 8-bit bitmaps that are 3~times the + * width of the original glyph outline in pixels, and which use the + * @FT_PIXEL_MODE_LCD mode. + * + * FT_RENDER_MODE_LCD_V :: + * This mode corresponds to vertical RGB and BGR subpixel displays + * (like PDA screens, rotated LCD displays, etc.). It produces 8-bit + * bitmaps that are 3~times the height of the original glyph outline in + * pixels and use the @FT_PIXEL_MODE_LCD_V mode. + * + * FT_RENDER_MODE_SDF :: + * This mode corresponds to 8-bit, single-channel signed distance field + * (SDF) bitmaps. Each pixel in the SDF grid is the value from the + * pixel's position to the nearest glyph's outline. The distances are + * calculated from the center of the pixel and are positive if they are + * filled by the outline (i.e., inside the outline) and negative + * otherwise. Check the note below on how to convert the output values + * to usable data. + * + * @note: + * The selected render mode only affects vector glyphs of a font. + * Embedded bitmaps often have a different pixel mode like + * @FT_PIXEL_MODE_MONO. You can use @FT_Bitmap_Convert to transform them + * into 8-bit pixmaps. + * + * For @FT_RENDER_MODE_SDF the output bitmap buffer contains normalized + * distances that are packed into unsigned 8-bit values. To get pixel + * values in floating point representation use the following pseudo-C + * code for the conversion. + * + * ``` + * // Load glyph and render using FT_RENDER_MODE_SDF, + * // then use the output buffer as follows. + * + * ... + * FT_Byte buffer = glyph->bitmap->buffer; + * + * + * for pixel in buffer + * { + * // `sd` is the signed distance and `spread` is the current spread; + * // the default spread is 2 and can be changed. + * + * float sd = (float)pixel - 128.0f; + * + * + * // Convert to pixel values. + * sd = ( sd / 128.0f ) * spread; + * + * // Store `sd` in a buffer or use as required. + * } + * + * ``` + * + * FreeType has two rasterizers for generating SDF, namely: + * + * 1. `sdf` for generating SDF directly from glyph's outline, and + * + * 2. `bsdf` for generating SDF from rasterized bitmaps. + * + * Depending on the glyph type (i.e., outline or bitmap), one of the two + * rasterizers is chosen at runtime and used for generating SDFs. To + * force the use of `bsdf` you should render the glyph with any of the + * FreeType's other rendering modes (e.g., `FT_RENDER_MODE_NORMAL`) and + * then re-render with `FT_RENDER_MODE_SDF`. + * + * There are some issues with stability and possible failures of the SDF + * renderers (specifically `sdf`). + * + * 1. The `sdf` rasterizer is sensitive to really small features (e.g., + * sharp turns that are less than 1~pixel) and imperfections in the + * glyph's outline, causing artifacts in the final output. + * + * 2. The `sdf` rasterizer has limited support for handling intersecting + * contours and *cannot* handle self-intersecting contours whatsoever. + * Self-intersection happens when a single connected contour + * intersects itself at some point; having these in your font + * definitely poses a problem to the rasterizer and cause artifacts, + * too. + * + * 3. Generating SDF for really small glyphs may result in undesirable + * output; the pixel grid (which stores distance information) becomes + * too coarse. + * + * 4. Since the output buffer is normalized, precision at smaller spreads + * is greater than precision at larger spread values because the + * output range of [0..255] gets mapped to a smaller SDF range. A + * spread of~2 should be sufficient in most cases. + * + * Points (1) and (2) can be avoided by using the `bsdf` rasterizer, + * which is more stable than the `sdf` rasterizer in general. + * + */ + typedef enum FT_Render_Mode_ + { + FT_RENDER_MODE_NORMAL = 0, + FT_RENDER_MODE_LIGHT, + FT_RENDER_MODE_MONO, + FT_RENDER_MODE_LCD, + FT_RENDER_MODE_LCD_V, + FT_RENDER_MODE_SDF, + + FT_RENDER_MODE_MAX + + } FT_Render_Mode; + + + /* these constants are deprecated; use the corresponding */ + /* `FT_Render_Mode` values instead */ +#define ft_render_mode_normal FT_RENDER_MODE_NORMAL +#define ft_render_mode_mono FT_RENDER_MODE_MONO + + + /************************************************************************** + * + * @function: + * FT_Render_Glyph + * + * @description: + * Convert a given glyph image to a bitmap. It does so by inspecting the + * glyph image format, finding the relevant renderer, and invoking it. + * + * @inout: + * slot :: + * A handle to the glyph slot containing the image to convert. + * + * @input: + * render_mode :: + * The render mode used to render the glyph image into a bitmap. See + * @FT_Render_Mode for a list of possible values. + * + * If @FT_RENDER_MODE_NORMAL is used, a previous call of @FT_Load_Glyph + * with flag @FT_LOAD_COLOR makes `FT_Render_Glyph` provide a default + * blending of colored glyph layers associated with the current glyph + * slot (provided the font contains such layers) instead of rendering + * the glyph slot's outline. This is an experimental feature; see + * @FT_LOAD_COLOR for more information. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * When FreeType outputs a bitmap of a glyph, it really outputs an alpha + * coverage map. If a pixel is completely covered by a filled-in + * outline, the bitmap contains 0xFF at that pixel, meaning that + * 0xFF/0xFF fraction of that pixel is covered, meaning the pixel is 100% + * black (or 0% bright). If a pixel is only 50% covered (value 0x80), + * the pixel is made 50% black (50% bright or a middle shade of grey). + * 0% covered means 0% black (100% bright or white). + * + * On high-DPI screens like on smartphones and tablets, the pixels are so + * small that their chance of being completely covered and therefore + * completely black are fairly good. On the low-DPI screens, however, + * the situation is different. The pixels are too large for most of the + * details of a glyph and shades of gray are the norm rather than the + * exception. + * + * This is relevant because all our screens have a second problem: they + * are not linear. 1~+~1 is not~2. Twice the value does not result in + * twice the brightness. When a pixel is only 50% covered, the coverage + * map says 50% black, and this translates to a pixel value of 128 when + * you use 8~bits per channel (0-255). However, this does not translate + * to 50% brightness for that pixel on our sRGB and gamma~2.2 screens. + * Due to their non-linearity, they dwell longer in the darks and only a + * pixel value of about 186 results in 50% brightness -- 128 ends up too + * dark on both bright and dark backgrounds. The net result is that dark + * text looks burnt-out, pixely and blotchy on bright background, bright + * text too frail on dark backgrounds, and colored text on colored + * background (for example, red on green) seems to have dark halos or + * 'dirt' around it. The situation is especially ugly for diagonal stems + * like in 'w' glyph shapes where the quality of FreeType's anti-aliasing + * depends on the correct display of grays. On high-DPI screens where + * smaller, fully black pixels reign supreme, this doesn't matter, but on + * our low-DPI screens with all the gray shades, it does. 0% and 100% + * brightness are the same things in linear and non-linear space, just + * all the shades in-between aren't. + * + * The blending function for placing text over a background is + * + * ``` + * dst = alpha * src + (1 - alpha) * dst , + * ``` + * + * which is known as the OVER operator. + * + * To correctly composite an anti-aliased pixel of a glyph onto a + * surface, + * + * 1. take the foreground and background colors (e.g., in sRGB space) + * and apply gamma to get them in a linear space, + * + * 2. use OVER to blend the two linear colors using the glyph pixel + * as the alpha value (remember, the glyph bitmap is an alpha coverage + * bitmap), and + * + * 3. apply inverse gamma to the blended pixel and write it back to + * the image. + * + * Internal testing at Adobe found that a target inverse gamma of~1.8 for + * step~3 gives good results across a wide range of displays with an sRGB + * gamma curve or a similar one. + * + * This process can cost performance. There is an approximation that + * does not need to know about the background color; see + * https://bel.fi/alankila/lcd/ and + * https://bel.fi/alankila/lcd/alpcor.html for details. + * + * **ATTENTION**: Linear blending is even more important when dealing + * with subpixel-rendered glyphs to prevent color-fringing! A + * subpixel-rendered glyph must first be filtered with a filter that + * gives equal weight to the three color primaries and does not exceed a + * sum of 0x100, see section @lcd_rendering. Then the only difference to + * gray linear blending is that subpixel-rendered linear blending is done + * 3~times per pixel: red foreground subpixel to red background subpixel + * and so on for green and blue. + */ + FT_EXPORT( FT_Error ) + FT_Render_Glyph( FT_GlyphSlot slot, + FT_Render_Mode render_mode ); + + + /************************************************************************** + * + * @enum: + * FT_Kerning_Mode + * + * @description: + * An enumeration to specify the format of kerning values returned by + * @FT_Get_Kerning. + * + * @values: + * FT_KERNING_DEFAULT :: + * Return grid-fitted kerning distances in 26.6 fractional pixels. + * + * FT_KERNING_UNFITTED :: + * Return un-grid-fitted kerning distances in 26.6 fractional pixels. + * + * FT_KERNING_UNSCALED :: + * Return the kerning vector in original font units. + * + * @note: + * `FT_KERNING_DEFAULT` returns full pixel values; it also makes FreeType + * heuristically scale down kerning distances at small ppem values so + * that they don't become too big. + * + * Both `FT_KERNING_DEFAULT` and `FT_KERNING_UNFITTED` use the current + * horizontal scaling factor (as set e.g. with @FT_Set_Char_Size) to + * convert font units to pixels. + */ + typedef enum FT_Kerning_Mode_ + { + FT_KERNING_DEFAULT = 0, + FT_KERNING_UNFITTED, + FT_KERNING_UNSCALED + + } FT_Kerning_Mode; + + + /* these constants are deprecated; use the corresponding */ + /* `FT_Kerning_Mode` values instead */ +#define ft_kerning_default FT_KERNING_DEFAULT +#define ft_kerning_unfitted FT_KERNING_UNFITTED +#define ft_kerning_unscaled FT_KERNING_UNSCALED + + + /************************************************************************** + * + * @function: + * FT_Get_Kerning + * + * @description: + * Return the kerning vector between two glyphs of the same face. + * + * @input: + * face :: + * A handle to a source face object. + * + * left_glyph :: + * The index of the left glyph in the kern pair. + * + * right_glyph :: + * The index of the right glyph in the kern pair. + * + * kern_mode :: + * See @FT_Kerning_Mode for more information. Determines the scale and + * dimension of the returned kerning vector. + * + * @output: + * akerning :: + * The kerning vector. This is either in font units, fractional pixels + * (26.6 format), or pixels for scalable formats, and in pixels for + * fixed-sizes formats. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Only horizontal layouts (left-to-right & right-to-left) are supported + * by this method. Other layouts, or more sophisticated kernings, are + * out of the scope of this API function -- they can be implemented + * through format-specific interfaces. + * + * Kerning for OpenType fonts implemented in a 'GPOS' table is not + * supported; use @FT_HAS_KERNING to find out whether a font has data + * that can be extracted with `FT_Get_Kerning`. + */ + FT_EXPORT( FT_Error ) + FT_Get_Kerning( FT_Face face, + FT_UInt left_glyph, + FT_UInt right_glyph, + FT_UInt kern_mode, + FT_Vector *akerning ); + + + /************************************************************************** + * + * @function: + * FT_Get_Track_Kerning + * + * @description: + * Return the track kerning for a given face object at a given size. + * + * @input: + * face :: + * A handle to a source face object. + * + * point_size :: + * The point size in 16.16 fractional points. + * + * degree :: + * The degree of tightness. Increasingly negative values represent + * tighter track kerning, while increasingly positive values represent + * looser track kerning. Value zero means no track kerning. + * + * @output: + * akerning :: + * The kerning in 16.16 fractional points, to be uniformly applied + * between all glyphs. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Currently, only the Type~1 font driver supports track kerning, using + * data from AFM files (if attached with @FT_Attach_File or + * @FT_Attach_Stream). + * + * Only very few AFM files come with track kerning data; please refer to + * Adobe's AFM specification for more details. + */ + FT_EXPORT( FT_Error ) + FT_Get_Track_Kerning( FT_Face face, + FT_Fixed point_size, + FT_Int degree, + FT_Fixed* akerning ); + + + /************************************************************************** + * + * @section: + * character_mapping + * + */ + + /************************************************************************** + * + * @function: + * FT_Select_Charmap + * + * @description: + * Select a given charmap by its encoding tag (as listed in + * `freetype.h`). + * + * @inout: + * face :: + * A handle to the source face object. + * + * @input: + * encoding :: + * A handle to the selected encoding. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function returns an error if no charmap in the face corresponds + * to the encoding queried here. + * + * Because many fonts contain more than a single cmap for Unicode + * encoding, this function has some special code to select the one that + * covers Unicode best ('best' in the sense that a UCS-4 cmap is + * preferred to a UCS-2 cmap). It is thus preferable to @FT_Set_Charmap + * in this case. + */ + FT_EXPORT( FT_Error ) + FT_Select_Charmap( FT_Face face, + FT_Encoding encoding ); + + + /************************************************************************** + * + * @function: + * FT_Set_Charmap + * + * @description: + * Select a given charmap for character code to glyph index mapping. + * + * @inout: + * face :: + * A handle to the source face object. + * + * @input: + * charmap :: + * A handle to the selected charmap. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function returns an error if the charmap is not part of the face + * (i.e., if it is not listed in the `face->charmaps` table). + * + * It also fails if an OpenType type~14 charmap is selected (which + * doesn't map character codes to glyph indices at all). + */ + FT_EXPORT( FT_Error ) + FT_Set_Charmap( FT_Face face, + FT_CharMap charmap ); + + + /************************************************************************** + * + * @function: + * FT_Get_Charmap_Index + * + * @description: + * Retrieve index of a given charmap. + * + * @input: + * charmap :: + * A handle to a charmap. + * + * @return: + * The index into the array of character maps within the face to which + * `charmap` belongs. If an error occurs, -1 is returned. + * + */ + FT_EXPORT( FT_Int ) + FT_Get_Charmap_Index( FT_CharMap charmap ); + + + /************************************************************************** + * + * @function: + * FT_Get_Char_Index + * + * @description: + * Return the glyph index of a given character code. This function uses + * the currently selected charmap to do the mapping. + * + * @input: + * face :: + * A handle to the source face object. + * + * charcode :: + * The character code. + * + * @return: + * The glyph index. 0~means 'undefined character code'. + * + * @note: + * If you use FreeType to manipulate the contents of font files directly, + * be aware that the glyph index returned by this function doesn't always + * correspond to the internal indices used within the file. This is done + * to ensure that value~0 always corresponds to the 'missing glyph'. If + * the first glyph is not named '.notdef', then for Type~1 and Type~42 + * fonts, '.notdef' will be moved into the glyph ID~0 position, and + * whatever was there will be moved to the position '.notdef' had. For + * Type~1 fonts, if there is no '.notdef' glyph at all, then one will be + * created at index~0 and whatever was there will be moved to the last + * index -- Type~42 fonts are considered invalid under this condition. + */ + FT_EXPORT( FT_UInt ) + FT_Get_Char_Index( FT_Face face, + FT_ULong charcode ); + + + /************************************************************************** + * + * @function: + * FT_Get_First_Char + * + * @description: + * Return the first character code in the current charmap of a given + * face, together with its corresponding glyph index. + * + * @input: + * face :: + * A handle to the source face object. + * + * @output: + * agindex :: + * Glyph index of first character code. 0~if charmap is empty. + * + * @return: + * The charmap's first character code. + * + * @note: + * You should use this function together with @FT_Get_Next_Char to parse + * all character codes available in a given charmap. The code should + * look like this: + * + * ``` + * FT_ULong charcode; + * FT_UInt gindex; + * + * + * charcode = FT_Get_First_Char( face, &gindex ); + * while ( gindex != 0 ) + * { + * ... do something with (charcode,gindex) pair ... + * + * charcode = FT_Get_Next_Char( face, charcode, &gindex ); + * } + * ``` + * + * Be aware that character codes can have values up to 0xFFFFFFFF; this + * might happen for non-Unicode or malformed cmaps. However, even with + * regular Unicode encoding, so-called 'last resort fonts' (using SFNT + * cmap format 13, see function @FT_Get_CMap_Format) normally have + * entries for all Unicode characters up to 0x1FFFFF, which can cause *a + * lot* of iterations. + * + * Note that `*agindex` is set to~0 if the charmap is empty. The result + * itself can be~0 in two cases: if the charmap is empty or if the + * value~0 is the first valid character code. + */ + FT_EXPORT( FT_ULong ) + FT_Get_First_Char( FT_Face face, + FT_UInt *agindex ); + + + /************************************************************************** + * + * @function: + * FT_Get_Next_Char + * + * @description: + * Return the next character code in the current charmap of a given face + * following the value `char_code`, as well as the corresponding glyph + * index. + * + * @input: + * face :: + * A handle to the source face object. + * + * char_code :: + * The starting character code. + * + * @output: + * agindex :: + * Glyph index of next character code. 0~if charmap is empty. + * + * @return: + * The charmap's next character code. + * + * @note: + * You should use this function with @FT_Get_First_Char to walk over all + * character codes available in a given charmap. See the note for that + * function for a simple code example. + * + * Note that `*agindex` is set to~0 when there are no more codes in the + * charmap. + */ + FT_EXPORT( FT_ULong ) + FT_Get_Next_Char( FT_Face face, + FT_ULong char_code, + FT_UInt *agindex ); + + + /************************************************************************** + * + * @section: + * face_creation + * + */ + + /************************************************************************** + * + * @function: + * FT_Face_Properties + * + * @description: + * Set or override certain (library or module-wide) properties on a + * face-by-face basis. Useful for finer-grained control and avoiding + * locks on shared structures (threads can modify their own faces as they + * see fit). + * + * Contrary to @FT_Property_Set, this function uses @FT_Parameter so that + * you can pass multiple properties to the target face in one call. Note + * that only a subset of the available properties can be controlled. + * + * * @FT_PARAM_TAG_STEM_DARKENING (stem darkening, corresponding to the + * property `no-stem-darkening` provided by the 'autofit', 'cff', + * 'type1', and 't1cid' modules; see @no-stem-darkening). + * + * * @FT_PARAM_TAG_LCD_FILTER_WEIGHTS (LCD filter weights, corresponding + * to function @FT_Library_SetLcdFilterWeights). + * + * * @FT_PARAM_TAG_RANDOM_SEED (seed value for the CFF, Type~1, and CID + * 'random' operator, corresponding to the `random-seed` property + * provided by the 'cff', 'type1', and 't1cid' modules; see + * @random-seed). + * + * Pass `NULL` as `data` in @FT_Parameter for a given tag to reset the + * option and use the library or module default again. + * + * @input: + * face :: + * A handle to the source face object. + * + * num_properties :: + * The number of properties that follow. + * + * properties :: + * A handle to an @FT_Parameter array with `num_properties` elements. + * + * @return: + * FreeType error code. 0~means success. + * + * @example: + * Here is an example that sets three properties. You must define + * `FT_CONFIG_OPTION_SUBPIXEL_RENDERING` to make the LCD filter examples + * work. + * + * ``` + * FT_Parameter property1; + * FT_Bool darken_stems = 1; + * + * FT_Parameter property2; + * FT_LcdFiveTapFilter custom_weight = + * { 0x11, 0x44, 0x56, 0x44, 0x11 }; + * + * FT_Parameter property3; + * FT_Int32 random_seed = 314159265; + * + * FT_Parameter properties[3] = { property1, + * property2, + * property3 }; + * + * + * property1.tag = FT_PARAM_TAG_STEM_DARKENING; + * property1.data = &darken_stems; + * + * property2.tag = FT_PARAM_TAG_LCD_FILTER_WEIGHTS; + * property2.data = custom_weight; + * + * property3.tag = FT_PARAM_TAG_RANDOM_SEED; + * property3.data = &random_seed; + * + * FT_Face_Properties( face, 3, properties ); + * ``` + * + * The next example resets a single property to its default value. + * + * ``` + * FT_Parameter property; + * + * + * property.tag = FT_PARAM_TAG_LCD_FILTER_WEIGHTS; + * property.data = NULL; + * + * FT_Face_Properties( face, 1, &property ); + * ``` + * + * @since: + * 2.8 + * + */ + FT_EXPORT( FT_Error ) + FT_Face_Properties( FT_Face face, + FT_UInt num_properties, + FT_Parameter* properties ); + + + /************************************************************************** + * + * @section: + * information_retrieval + * + */ + + /************************************************************************** + * + * @function: + * FT_Get_Name_Index + * + * @description: + * Return the glyph index of a given glyph name. This only works + * for those faces where @FT_HAS_GLYPH_NAMES returns true. + * + * @input: + * face :: + * A handle to the source face object. + * + * glyph_name :: + * The glyph name. + * + * @return: + * The glyph index. 0~means 'undefined character code'. + * + * @note: + * Acceptable glyph names might come from the [Adobe Glyph + * List](https://github.com/adobe-type-tools/agl-aglfn). See + * @FT_Get_Glyph_Name for the inverse functionality. + * + * This function has limited capabilities if the config macro + * `FT_CONFIG_OPTION_POSTSCRIPT_NAMES` is not defined in `ftoption.h`: + * It then works only for fonts that actually embed glyph names (which + * many recent OpenType fonts do not). + */ + FT_EXPORT( FT_UInt ) + FT_Get_Name_Index( FT_Face face, + const FT_String* glyph_name ); + + + /************************************************************************** + * + * @function: + * FT_Get_Glyph_Name + * + * @description: + * Retrieve the ASCII name of a given glyph in a face. This only works + * for those faces where @FT_HAS_GLYPH_NAMES returns true. + * + * @input: + * face :: + * A handle to a source face object. + * + * glyph_index :: + * The glyph index. + * + * buffer_max :: + * The maximum number of bytes available in the buffer. + * + * @output: + * buffer :: + * A pointer to a target buffer where the name is copied to. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * An error is returned if the face doesn't provide glyph names or if the + * glyph index is invalid. In all cases of failure, the first byte of + * `buffer` is set to~0 to indicate an empty name. + * + * The glyph name is truncated to fit within the buffer if it is too + * long. The returned string is always zero-terminated. + * + * Be aware that FreeType reorders glyph indices internally so that glyph + * index~0 always corresponds to the 'missing glyph' (called '.notdef'). + * + * This function has limited capabilities if the config macro + * `FT_CONFIG_OPTION_POSTSCRIPT_NAMES` is not defined in `ftoption.h`: + * It then works only for fonts that actually embed glyph names (which + * many recent OpenType fonts do not). + */ + FT_EXPORT( FT_Error ) + FT_Get_Glyph_Name( FT_Face face, + FT_UInt glyph_index, + FT_Pointer buffer, + FT_UInt buffer_max ); + + + /************************************************************************** + * + * @function: + * FT_Get_Postscript_Name + * + * @description: + * Retrieve the ASCII PostScript name of a given face, if available. + * This only works with PostScript, TrueType, and OpenType fonts. + * + * @input: + * face :: + * A handle to the source face object. + * + * @return: + * A pointer to the face's PostScript name. `NULL` if unavailable. + * + * @note: + * The returned pointer is owned by the face and is destroyed with it. + * + * For variation fonts, this string changes if you select a different + * instance, and you have to call `FT_Get_PostScript_Name` again to + * retrieve it. FreeType follows Adobe TechNote #5902, 'Generating + * PostScript Names for Fonts Using OpenType Font Variations'. + * + * https://download.macromedia.com/pub/developer/opentype/tech-notes/5902.AdobePSNameGeneration.html + * + * [Since 2.9] Special PostScript names for named instances are only + * returned if the named instance is set with @FT_Set_Named_Instance (and + * the font has corresponding entries in its 'fvar' table or is the + * default named instance). If @FT_IS_VARIATION returns true, the + * algorithmically derived PostScript name is provided, not looking up + * special entries for named instances. + */ + FT_EXPORT( const char* ) + FT_Get_Postscript_Name( FT_Face face ); + + + /************************************************************************** + * + * @enum: + * FT_SUBGLYPH_FLAG_XXX + * + * @description: + * A list of constants describing subglyphs. Please refer to the 'glyf' + * table description in the OpenType specification for the meaning of the + * various flags (which get synthesized for non-OpenType subglyphs). + * + * https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#composite-glyph-description + * + * @values: + * FT_SUBGLYPH_FLAG_ARGS_ARE_WORDS :: + * FT_SUBGLYPH_FLAG_ARGS_ARE_XY_VALUES :: + * FT_SUBGLYPH_FLAG_ROUND_XY_TO_GRID :: + * FT_SUBGLYPH_FLAG_SCALE :: + * FT_SUBGLYPH_FLAG_XY_SCALE :: + * FT_SUBGLYPH_FLAG_2X2 :: + * FT_SUBGLYPH_FLAG_USE_MY_METRICS :: + * + */ +#define FT_SUBGLYPH_FLAG_ARGS_ARE_WORDS 1 +#define FT_SUBGLYPH_FLAG_ARGS_ARE_XY_VALUES 2 +#define FT_SUBGLYPH_FLAG_ROUND_XY_TO_GRID 4 +#define FT_SUBGLYPH_FLAG_SCALE 8 +#define FT_SUBGLYPH_FLAG_XY_SCALE 0x40 +#define FT_SUBGLYPH_FLAG_2X2 0x80 +#define FT_SUBGLYPH_FLAG_USE_MY_METRICS 0x200 + + + /************************************************************************** + * + * @function: + * FT_Get_SubGlyph_Info + * + * @description: + * Retrieve a description of a given subglyph. Only use it if + * `glyph->format` is @FT_GLYPH_FORMAT_COMPOSITE; an error is returned + * otherwise. + * + * @input: + * glyph :: + * The source glyph slot. + * + * sub_index :: + * The index of the subglyph. Must be less than + * `glyph->num_subglyphs`. + * + * @output: + * p_index :: + * The glyph index of the subglyph. + * + * p_flags :: + * The subglyph flags, see @FT_SUBGLYPH_FLAG_XXX. + * + * p_arg1 :: + * The subglyph's first argument (if any). + * + * p_arg2 :: + * The subglyph's second argument (if any). + * + * p_transform :: + * The subglyph transformation (if any). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The values of `*p_arg1`, `*p_arg2`, and `*p_transform` must be + * interpreted depending on the flags returned in `*p_flags`. See the + * OpenType specification for details. + * + * https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#composite-glyph-description + * + */ + FT_EXPORT( FT_Error ) + FT_Get_SubGlyph_Info( FT_GlyphSlot glyph, + FT_UInt sub_index, + FT_Int *p_index, + FT_UInt *p_flags, + FT_Int *p_arg1, + FT_Int *p_arg2, + FT_Matrix *p_transform ); + + + /************************************************************************** + * + * @enum: + * FT_FSTYPE_XXX + * + * @description: + * A list of bit flags used in the `fsType` field of the OS/2 table in a + * TrueType or OpenType font and the `FSType` entry in a PostScript font. + * These bit flags are returned by @FT_Get_FSType_Flags; they inform + * client applications of embedding and subsetting restrictions + * associated with a font. + * + * See + * https://www.adobe.com/content/dam/Adobe/en/devnet/acrobat/pdfs/FontPolicies.pdf + * for more details. + * + * @values: + * FT_FSTYPE_INSTALLABLE_EMBEDDING :: + * Fonts with no fsType bit set may be embedded and permanently + * installed on the remote system by an application. + * + * FT_FSTYPE_RESTRICTED_LICENSE_EMBEDDING :: + * Fonts that have only this bit set must not be modified, embedded or + * exchanged in any manner without first obtaining permission of the + * font software copyright owner. + * + * FT_FSTYPE_PREVIEW_AND_PRINT_EMBEDDING :: + * The font may be embedded and temporarily loaded on the remote + * system. Documents containing Preview & Print fonts must be opened + * 'read-only'; no edits can be applied to the document. + * + * FT_FSTYPE_EDITABLE_EMBEDDING :: + * The font may be embedded but must only be installed temporarily on + * other systems. In contrast to Preview & Print fonts, documents + * containing editable fonts may be opened for reading, editing is + * permitted, and changes may be saved. + * + * FT_FSTYPE_NO_SUBSETTING :: + * The font may not be subsetted prior to embedding. + * + * FT_FSTYPE_BITMAP_EMBEDDING_ONLY :: + * Only bitmaps contained in the font may be embedded; no outline data + * may be embedded. If there are no bitmaps available in the font, + * then the font is unembeddable. + * + * @note: + * The flags are ORed together, thus more than a single value can be + * returned. + * + * While the `fsType` flags can indicate that a font may be embedded, a + * license with the font vendor may be separately required to use the + * font in this way. + */ +#define FT_FSTYPE_INSTALLABLE_EMBEDDING 0x0000 +#define FT_FSTYPE_RESTRICTED_LICENSE_EMBEDDING 0x0002 +#define FT_FSTYPE_PREVIEW_AND_PRINT_EMBEDDING 0x0004 +#define FT_FSTYPE_EDITABLE_EMBEDDING 0x0008 +#define FT_FSTYPE_NO_SUBSETTING 0x0100 +#define FT_FSTYPE_BITMAP_EMBEDDING_ONLY 0x0200 + + + /************************************************************************** + * + * @function: + * FT_Get_FSType_Flags + * + * @description: + * Return the `fsType` flags for a font. + * + * @input: + * face :: + * A handle to the source face object. + * + * @return: + * The `fsType` flags, see @FT_FSTYPE_XXX. + * + * @note: + * Use this function rather than directly reading the `fs_type` field in + * the @PS_FontInfoRec structure, which is only guaranteed to return the + * correct results for Type~1 fonts. + * + * @since: + * 2.3.8 + * + */ + FT_EXPORT( FT_UShort ) + FT_Get_FSType_Flags( FT_Face face ); + + + /************************************************************************** + * + * @section: + * glyph_variants + * + * @title: + * Unicode Variation Sequences + * + * @abstract: + * The FreeType~2 interface to Unicode Variation Sequences (UVS), using + * the SFNT cmap format~14. + * + * @description: + * Many characters, especially for CJK scripts, have variant forms. They + * are a sort of grey area somewhere between being totally irrelevant and + * semantically distinct; for this reason, the Unicode consortium decided + * to introduce Variation Sequences (VS), consisting of a Unicode base + * character and a variation selector instead of further extending the + * already huge number of characters. + * + * Unicode maintains two different sets, namely 'Standardized Variation + * Sequences' and registered 'Ideographic Variation Sequences' (IVS), + * collected in the 'Ideographic Variation Database' (IVD). + * + * https://unicode.org/Public/UCD/latest/ucd/StandardizedVariants.txt + * https://unicode.org/reports/tr37/ https://unicode.org/ivd/ + * + * To date (January 2017), the character with the most ideographic + * variations is U+9089, having 32 such IVS. + * + * Three Mongolian Variation Selectors have the values U+180B-U+180D; 256 + * generic Variation Selectors are encoded in the ranges U+FE00-U+FE0F + * and U+E0100-U+E01EF. IVS currently use Variation Selectors from the + * range U+E0100-U+E01EF only. + * + * A VS consists of the base character value followed by a single + * Variation Selector. For example, to get the first variation of + * U+9089, you have to write the character sequence `U+9089 U+E0100`. + * + * Adobe and MS decided to support both standardized and ideographic VS + * with a new cmap subtable (format~14). It is an odd subtable because + * it is not a mapping of input code points to glyphs, but contains lists + * of all variations supported by the font. + * + * A variation may be either 'default' or 'non-default' for a given font. + * A default variation is the one you will get for that code point if you + * look it up in the standard Unicode cmap. A non-default variation is a + * different glyph. + * + */ + + + /************************************************************************** + * + * @function: + * FT_Face_GetCharVariantIndex + * + * @description: + * Return the glyph index of a given character code as modified by the + * variation selector. + * + * @input: + * face :: + * A handle to the source face object. + * + * charcode :: + * The character code point in Unicode. + * + * variantSelector :: + * The Unicode code point of the variation selector. + * + * @return: + * The glyph index. 0~means either 'undefined character code', or + * 'undefined selector code', or 'no variation selector cmap subtable', + * or 'current CharMap is not Unicode'. + * + * @note: + * If you use FreeType to manipulate the contents of font files directly, + * be aware that the glyph index returned by this function doesn't always + * correspond to the internal indices used within the file. This is done + * to ensure that value~0 always corresponds to the 'missing glyph'. + * + * This function is only meaningful if + * a) the font has a variation selector cmap sub table, and + * b) the current charmap has a Unicode encoding. + * + * @since: + * 2.3.6 + * + */ + FT_EXPORT( FT_UInt ) + FT_Face_GetCharVariantIndex( FT_Face face, + FT_ULong charcode, + FT_ULong variantSelector ); + + + /************************************************************************** + * + * @function: + * FT_Face_GetCharVariantIsDefault + * + * @description: + * Check whether this variation of this Unicode character is the one to + * be found in the charmap. + * + * @input: + * face :: + * A handle to the source face object. + * + * charcode :: + * The character codepoint in Unicode. + * + * variantSelector :: + * The Unicode codepoint of the variation selector. + * + * @return: + * 1~if found in the standard (Unicode) cmap, 0~if found in the variation + * selector cmap, or -1 if it is not a variation. + * + * @note: + * This function is only meaningful if the font has a variation selector + * cmap subtable. + * + * @since: + * 2.3.6 + * + */ + FT_EXPORT( FT_Int ) + FT_Face_GetCharVariantIsDefault( FT_Face face, + FT_ULong charcode, + FT_ULong variantSelector ); + + + /************************************************************************** + * + * @function: + * FT_Face_GetVariantSelectors + * + * @description: + * Return a zero-terminated list of Unicode variation selectors found in + * the font. + * + * @input: + * face :: + * A handle to the source face object. + * + * @return: + * A pointer to an array of selector code points, or `NULL` if there is + * no valid variation selector cmap subtable. + * + * @note: + * The last item in the array is~0; the array is owned by the @FT_Face + * object but can be overwritten or released on the next call to a + * FreeType function. + * + * @since: + * 2.3.6 + * + */ + FT_EXPORT( FT_UInt32* ) + FT_Face_GetVariantSelectors( FT_Face face ); + + + /************************************************************************** + * + * @function: + * FT_Face_GetVariantsOfChar + * + * @description: + * Return a zero-terminated list of Unicode variation selectors found for + * the specified character code. + * + * @input: + * face :: + * A handle to the source face object. + * + * charcode :: + * The character codepoint in Unicode. + * + * @return: + * A pointer to an array of variation selector code points that are + * active for the given character, or `NULL` if the corresponding list is + * empty. + * + * @note: + * The last item in the array is~0; the array is owned by the @FT_Face + * object but can be overwritten or released on the next call to a + * FreeType function. + * + * @since: + * 2.3.6 + * + */ + FT_EXPORT( FT_UInt32* ) + FT_Face_GetVariantsOfChar( FT_Face face, + FT_ULong charcode ); + + + /************************************************************************** + * + * @function: + * FT_Face_GetCharsOfVariant + * + * @description: + * Return a zero-terminated list of Unicode character codes found for the + * specified variation selector. + * + * @input: + * face :: + * A handle to the source face object. + * + * variantSelector :: + * The variation selector code point in Unicode. + * + * @return: + * A list of all the code points that are specified by this selector + * (both default and non-default codes are returned) or `NULL` if there + * is no valid cmap or the variation selector is invalid. + * + * @note: + * The last item in the array is~0; the array is owned by the @FT_Face + * object but can be overwritten or released on the next call to a + * FreeType function. + * + * @since: + * 2.3.6 + * + */ + FT_EXPORT( FT_UInt32* ) + FT_Face_GetCharsOfVariant( FT_Face face, + FT_ULong variantSelector ); + + + /************************************************************************** + * + * @section: + * computations + * + * @title: + * Computations + * + * @abstract: + * Crunching fixed numbers and vectors. + * + * @description: + * This section contains various functions used to perform computations + * on 16.16 fixed-point numbers or 2D vectors. FreeType does not use + * floating-point data types. + * + * **Attention**: Most arithmetic functions take `FT_Long` as arguments. + * For historical reasons, FreeType was designed under the assumption + * that `FT_Long` is a 32-bit integer; results can thus be undefined if + * the arguments don't fit into 32 bits. + * + * @order: + * FT_MulDiv + * FT_MulFix + * FT_DivFix + * FT_RoundFix + * FT_CeilFix + * FT_FloorFix + * FT_Vector_Transform + * FT_Matrix_Multiply + * FT_Matrix_Invert + * + */ + + + /************************************************************************** + * + * @function: + * FT_MulDiv + * + * @description: + * Compute `(a*b)/c` with maximum accuracy, using a 64-bit intermediate + * integer whenever necessary. + * + * This function isn't necessarily as fast as some processor-specific + * operations, but is at least completely portable. + * + * @input: + * a :: + * The first multiplier. + * + * b :: + * The second multiplier. + * + * c :: + * The divisor. + * + * @return: + * The result of `(a*b)/c`. This function never traps when trying to + * divide by zero; it simply returns 'MaxInt' or 'MinInt' depending on + * the signs of `a` and `b`. + */ + FT_EXPORT( FT_Long ) + FT_MulDiv( FT_Long a, + FT_Long b, + FT_Long c ); + + + /************************************************************************** + * + * @function: + * FT_MulFix + * + * @description: + * Compute `(a*b)/0x10000` with maximum accuracy. Its main use is to + * multiply a given value by a 16.16 fixed-point factor. + * + * @input: + * a :: + * The first multiplier. + * + * b :: + * The second multiplier. Use a 16.16 factor here whenever possible + * (see note below). + * + * @return: + * The result of `(a*b)/0x10000`. + * + * @note: + * This function has been optimized for the case where the absolute value + * of `a` is less than 2048, and `b` is a 16.16 scaling factor. As this + * happens mainly when scaling from notional units to fractional pixels + * in FreeType, it resulted in noticeable speed improvements between + * versions 2.x and 1.x. + * + * As a conclusion, always try to place a 16.16 factor as the _second_ + * argument of this function; this can make a great difference. + */ + FT_EXPORT( FT_Long ) + FT_MulFix( FT_Long a, + FT_Long b ); + + + /************************************************************************** + * + * @function: + * FT_DivFix + * + * @description: + * Compute `(a*0x10000)/b` with maximum accuracy. Its main use is to + * divide a given value by a 16.16 fixed-point factor. + * + * @input: + * a :: + * The numerator. + * + * b :: + * The denominator. Use a 16.16 factor here. + * + * @return: + * The result of `(a*0x10000)/b`. + */ + FT_EXPORT( FT_Long ) + FT_DivFix( FT_Long a, + FT_Long b ); + + + /************************************************************************** + * + * @function: + * FT_RoundFix + * + * @description: + * Round a 16.16 fixed number. + * + * @input: + * a :: + * The number to be rounded. + * + * @return: + * `a` rounded to the nearest 16.16 fixed integer, halfway cases away + * from zero. + * + * @note: + * The function uses wrap-around arithmetic. + */ + FT_EXPORT( FT_Fixed ) + FT_RoundFix( FT_Fixed a ); + + + /************************************************************************** + * + * @function: + * FT_CeilFix + * + * @description: + * Compute the smallest following integer of a 16.16 fixed number. + * + * @input: + * a :: + * The number for which the ceiling function is to be computed. + * + * @return: + * `a` rounded towards plus infinity. + * + * @note: + * The function uses wrap-around arithmetic. + */ + FT_EXPORT( FT_Fixed ) + FT_CeilFix( FT_Fixed a ); + + + /************************************************************************** + * + * @function: + * FT_FloorFix + * + * @description: + * Compute the largest previous integer of a 16.16 fixed number. + * + * @input: + * a :: + * The number for which the floor function is to be computed. + * + * @return: + * `a` rounded towards minus infinity. + */ + FT_EXPORT( FT_Fixed ) + FT_FloorFix( FT_Fixed a ); + + + /************************************************************************** + * + * @function: + * FT_Vector_Transform + * + * @description: + * Transform a single vector through a 2x2 matrix. + * + * @inout: + * vector :: + * The target vector to transform. + * + * @input: + * matrix :: + * A pointer to the source 2x2 matrix. + * + * @note: + * The result is undefined if either `vector` or `matrix` is invalid. + */ + FT_EXPORT( void ) + FT_Vector_Transform( FT_Vector* vector, + const FT_Matrix* matrix ); + + + /************************************************************************** + * + * @section: + * library_setup + * + */ + + /************************************************************************** + * + * @enum: + * FREETYPE_XXX + * + * @description: + * These three macros identify the FreeType source code version. Use + * @FT_Library_Version to access them at runtime. + * + * @values: + * FREETYPE_MAJOR :: + * The major version number. + * FREETYPE_MINOR :: + * The minor version number. + * FREETYPE_PATCH :: + * The patch level. + * + * @note: + * The version number of FreeType if built as a dynamic link library with + * the 'libtool' package is _not_ controlled by these three macros. + * + */ +#define FREETYPE_MAJOR 2 +#define FREETYPE_MINOR 13 +#define FREETYPE_PATCH 1 + + + /************************************************************************** + * + * @function: + * FT_Library_Version + * + * @description: + * Return the version of the FreeType library being used. This is useful + * when dynamically linking to the library, since one cannot use the + * macros @FREETYPE_MAJOR, @FREETYPE_MINOR, and @FREETYPE_PATCH. + * + * @input: + * library :: + * A source library handle. + * + * @output: + * amajor :: + * The major version number. + * + * aminor :: + * The minor version number. + * + * apatch :: + * The patch version number. + * + * @note: + * The reason why this function takes a `library` argument is because + * certain programs implement library initialization in a custom way that + * doesn't use @FT_Init_FreeType. + * + * In such cases, the library version might not be available before the + * library object has been created. + */ + FT_EXPORT( void ) + FT_Library_Version( FT_Library library, + FT_Int *amajor, + FT_Int *aminor, + FT_Int *apatch ); + + + /************************************************************************** + * + * @section: + * other_api_data + * + */ + + /************************************************************************** + * + * @function: + * FT_Face_CheckTrueTypePatents + * + * @description: + * Deprecated, does nothing. + * + * @input: + * face :: + * A face handle. + * + * @return: + * Always returns false. + * + * @note: + * Since May 2010, TrueType hinting is no longer patented. + * + * @since: + * 2.3.5 + * + */ + FT_EXPORT( FT_Bool ) + FT_Face_CheckTrueTypePatents( FT_Face face ); + + + /************************************************************************** + * + * @function: + * FT_Face_SetUnpatentedHinting + * + * @description: + * Deprecated, does nothing. + * + * @input: + * face :: + * A face handle. + * + * value :: + * New boolean setting. + * + * @return: + * Always returns false. + * + * @note: + * Since May 2010, TrueType hinting is no longer patented. + * + * @since: + * 2.3.5 + * + */ + FT_EXPORT( FT_Bool ) + FT_Face_SetUnpatentedHinting( FT_Face face, + FT_Bool value ); + + /* */ + + +FT_END_HEADER + +#endif /* FREETYPE_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftadvanc.h b/Example/include/freetype/ftadvanc.h new file mode 100644 index 0000000..4560ded --- /dev/null +++ b/Example/include/freetype/ftadvanc.h @@ -0,0 +1,188 @@ +/**************************************************************************** + * + * ftadvanc.h + * + * Quick computation of advance widths (specification only). + * + * Copyright (C) 2008-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTADVANC_H_ +#define FTADVANC_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * quick_advance + * + * @title: + * Quick retrieval of advance values + * + * @abstract: + * Retrieve horizontal and vertical advance values without processing + * glyph outlines, if possible. + * + * @description: + * This section contains functions to quickly extract advance values + * without handling glyph outlines, if possible. + * + * @order: + * FT_Get_Advance + * FT_Get_Advances + * + */ + + + /************************************************************************** + * + * @enum: + * FT_ADVANCE_FLAG_FAST_ONLY + * + * @description: + * A bit-flag to be OR-ed with the `flags` parameter of the + * @FT_Get_Advance and @FT_Get_Advances functions. + * + * If set, it indicates that you want these functions to fail if the + * corresponding hinting mode or font driver doesn't allow for very quick + * advance computation. + * + * Typically, glyphs that are either unscaled, unhinted, bitmapped, or + * light-hinted can have their advance width computed very quickly. + * + * Normal and bytecode hinted modes that require loading, scaling, and + * hinting of the glyph outline, are extremely slow by comparison. + */ +#define FT_ADVANCE_FLAG_FAST_ONLY 0x20000000L + + + /************************************************************************** + * + * @function: + * FT_Get_Advance + * + * @description: + * Retrieve the advance value of a given glyph outline in an @FT_Face. + * + * @input: + * face :: + * The source @FT_Face handle. + * + * gindex :: + * The glyph index. + * + * load_flags :: + * A set of bit flags similar to those used when calling + * @FT_Load_Glyph, used to determine what kind of advances you need. + * + * @output: + * padvance :: + * The advance value. If scaling is performed (based on the value of + * `load_flags`), the advance value is in 16.16 format. Otherwise, it + * is in font units. + * + * If @FT_LOAD_VERTICAL_LAYOUT is set, this is the vertical advance + * corresponding to a vertical layout. Otherwise, it is the horizontal + * advance in a horizontal layout. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * This function may fail if you use @FT_ADVANCE_FLAG_FAST_ONLY and if + * the corresponding font backend doesn't have a quick way to retrieve + * the advances. + * + * A scaled advance is returned in 16.16 format but isn't transformed by + * the affine transformation specified by @FT_Set_Transform. + */ + FT_EXPORT( FT_Error ) + FT_Get_Advance( FT_Face face, + FT_UInt gindex, + FT_Int32 load_flags, + FT_Fixed *padvance ); + + + /************************************************************************** + * + * @function: + * FT_Get_Advances + * + * @description: + * Retrieve the advance values of several glyph outlines in an @FT_Face. + * + * @input: + * face :: + * The source @FT_Face handle. + * + * start :: + * The first glyph index. + * + * count :: + * The number of advance values you want to retrieve. + * + * load_flags :: + * A set of bit flags similar to those used when calling + * @FT_Load_Glyph. + * + * @output: + * padvance :: + * The advance values. This array, to be provided by the caller, must + * contain at least `count` elements. + * + * If scaling is performed (based on the value of `load_flags`), the + * advance values are in 16.16 format. Otherwise, they are in font + * units. + * + * If @FT_LOAD_VERTICAL_LAYOUT is set, these are the vertical advances + * corresponding to a vertical layout. Otherwise, they are the + * horizontal advances in a horizontal layout. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * This function may fail if you use @FT_ADVANCE_FLAG_FAST_ONLY and if + * the corresponding font backend doesn't have a quick way to retrieve + * the advances. + * + * Scaled advances are returned in 16.16 format but aren't transformed by + * the affine transformation specified by @FT_Set_Transform. + */ + FT_EXPORT( FT_Error ) + FT_Get_Advances( FT_Face face, + FT_UInt start, + FT_UInt count, + FT_Int32 load_flags, + FT_Fixed *padvances ); + + /* */ + + +FT_END_HEADER + +#endif /* FTADVANC_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftbbox.h b/Example/include/freetype/ftbbox.h new file mode 100644 index 0000000..fc21740 --- /dev/null +++ b/Example/include/freetype/ftbbox.h @@ -0,0 +1,101 @@ +/**************************************************************************** + * + * ftbbox.h + * + * FreeType exact bbox computation (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This component has a _single_ role: to compute exact outline bounding + * boxes. + * + * It is separated from the rest of the engine for various technical + * reasons. It may well be integrated in 'ftoutln' later. + * + */ + + +#ifndef FTBBOX_H_ +#define FTBBOX_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * outline_processing + * + */ + + + /************************************************************************** + * + * @function: + * FT_Outline_Get_BBox + * + * @description: + * Compute the exact bounding box of an outline. This is slower than + * computing the control box. However, it uses an advanced algorithm + * that returns _very_ quickly when the two boxes coincide. Otherwise, + * the outline Bezier arcs are traversed to extract their extrema. + * + * @input: + * outline :: + * A pointer to the source outline. + * + * @output: + * abbox :: + * The outline's exact bounding box. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If the font is tricky and the glyph has been loaded with + * @FT_LOAD_NO_SCALE, the resulting BBox is meaningless. To get + * reasonable values for the BBox it is necessary to load the glyph at a + * large ppem value (so that the hinting instructions can properly shift + * and scale the subglyphs), then extracting the BBox, which can be + * eventually converted back to font units. + */ + FT_EXPORT( FT_Error ) + FT_Outline_Get_BBox( FT_Outline* outline, + FT_BBox *abbox ); + + /* */ + + +FT_END_HEADER + +#endif /* FTBBOX_H_ */ + + +/* END */ + + +/* Local Variables: */ +/* coding: utf-8 */ +/* End: */ diff --git a/Example/include/freetype/ftbdf.h b/Example/include/freetype/ftbdf.h new file mode 100644 index 0000000..e8ce643 --- /dev/null +++ b/Example/include/freetype/ftbdf.h @@ -0,0 +1,212 @@ +/**************************************************************************** + * + * ftbdf.h + * + * FreeType API for accessing BDF-specific strings (specification). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTBDF_H_ +#define FTBDF_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * bdf_fonts + * + * @title: + * BDF and PCF Files + * + * @abstract: + * BDF and PCF specific API. + * + * @description: + * This section contains the declaration of functions specific to BDF and + * PCF fonts. + * + */ + + + /************************************************************************** + * + * @enum: + * BDF_PropertyType + * + * @description: + * A list of BDF property types. + * + * @values: + * BDF_PROPERTY_TYPE_NONE :: + * Value~0 is used to indicate a missing property. + * + * BDF_PROPERTY_TYPE_ATOM :: + * Property is a string atom. + * + * BDF_PROPERTY_TYPE_INTEGER :: + * Property is a 32-bit signed integer. + * + * BDF_PROPERTY_TYPE_CARDINAL :: + * Property is a 32-bit unsigned integer. + */ + typedef enum BDF_PropertyType_ + { + BDF_PROPERTY_TYPE_NONE = 0, + BDF_PROPERTY_TYPE_ATOM = 1, + BDF_PROPERTY_TYPE_INTEGER = 2, + BDF_PROPERTY_TYPE_CARDINAL = 3 + + } BDF_PropertyType; + + + /************************************************************************** + * + * @type: + * BDF_Property + * + * @description: + * A handle to a @BDF_PropertyRec structure to model a given BDF/PCF + * property. + */ + typedef struct BDF_PropertyRec_* BDF_Property; + + + /************************************************************************** + * + * @struct: + * BDF_PropertyRec + * + * @description: + * This structure models a given BDF/PCF property. + * + * @fields: + * type :: + * The property type. + * + * u.atom :: + * The atom string, if type is @BDF_PROPERTY_TYPE_ATOM. May be + * `NULL`, indicating an empty string. + * + * u.integer :: + * A signed integer, if type is @BDF_PROPERTY_TYPE_INTEGER. + * + * u.cardinal :: + * An unsigned integer, if type is @BDF_PROPERTY_TYPE_CARDINAL. + */ + typedef struct BDF_PropertyRec_ + { + BDF_PropertyType type; + union { + const char* atom; + FT_Int32 integer; + FT_UInt32 cardinal; + + } u; + + } BDF_PropertyRec; + + + /************************************************************************** + * + * @function: + * FT_Get_BDF_Charset_ID + * + * @description: + * Retrieve a BDF font character set identity, according to the BDF + * specification. + * + * @input: + * face :: + * A handle to the input face. + * + * @output: + * acharset_encoding :: + * Charset encoding, as a C~string, owned by the face. + * + * acharset_registry :: + * Charset registry, as a C~string, owned by the face. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function only works with BDF faces, returning an error otherwise. + */ + FT_EXPORT( FT_Error ) + FT_Get_BDF_Charset_ID( FT_Face face, + const char* *acharset_encoding, + const char* *acharset_registry ); + + + /************************************************************************** + * + * @function: + * FT_Get_BDF_Property + * + * @description: + * Retrieve a BDF property from a BDF or PCF font file. + * + * @input: + * face :: + * A handle to the input face. + * + * name :: + * The property name. + * + * @output: + * aproperty :: + * The property. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function works with BDF _and_ PCF fonts. It returns an error + * otherwise. It also returns an error if the property is not in the + * font. + * + * A 'property' is a either key-value pair within the STARTPROPERTIES + * ... ENDPROPERTIES block of a BDF font or a key-value pair from the + * `info->props` array within a `FontRec` structure of a PCF font. + * + * Integer properties are always stored as 'signed' within PCF fonts; + * consequently, @BDF_PROPERTY_TYPE_CARDINAL is a possible return value + * for BDF fonts only. + * + * In case of error, `aproperty->type` is always set to + * @BDF_PROPERTY_TYPE_NONE. + */ + FT_EXPORT( FT_Error ) + FT_Get_BDF_Property( FT_Face face, + const char* prop_name, + BDF_PropertyRec *aproperty ); + + /* */ + +FT_END_HEADER + +#endif /* FTBDF_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftbitmap.h b/Example/include/freetype/ftbitmap.h new file mode 100644 index 0000000..eb6b4b1 --- /dev/null +++ b/Example/include/freetype/ftbitmap.h @@ -0,0 +1,329 @@ +/**************************************************************************** + * + * ftbitmap.h + * + * FreeType utility functions for bitmaps (specification). + * + * Copyright (C) 2004-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTBITMAP_H_ +#define FTBITMAP_H_ + + +#include +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * bitmap_handling + * + * @title: + * Bitmap Handling + * + * @abstract: + * Handling FT_Bitmap objects. + * + * @description: + * This section contains functions for handling @FT_Bitmap objects, + * automatically adjusting the target's bitmap buffer size as needed. + * + * Note that none of the functions changes the bitmap's 'flow' (as + * indicated by the sign of the `pitch` field in @FT_Bitmap). + * + * To set the flow, assign an appropriate positive or negative value to + * the `pitch` field of the target @FT_Bitmap object after calling + * @FT_Bitmap_Init but before calling any of the other functions + * described here. + */ + + + /************************************************************************** + * + * @function: + * FT_Bitmap_Init + * + * @description: + * Initialize a pointer to an @FT_Bitmap structure. + * + * @inout: + * abitmap :: + * A pointer to the bitmap structure. + * + * @note: + * A deprecated name for the same function is `FT_Bitmap_New`. + */ + FT_EXPORT( void ) + FT_Bitmap_Init( FT_Bitmap *abitmap ); + + + /* deprecated */ + FT_EXPORT( void ) + FT_Bitmap_New( FT_Bitmap *abitmap ); + + + /************************************************************************** + * + * @function: + * FT_Bitmap_Copy + * + * @description: + * Copy a bitmap into another one. + * + * @input: + * library :: + * A handle to a library object. + * + * source :: + * A handle to the source bitmap. + * + * @output: + * target :: + * A handle to the target bitmap. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * `source->buffer` and `target->buffer` must neither be equal nor + * overlap. + */ + FT_EXPORT( FT_Error ) + FT_Bitmap_Copy( FT_Library library, + const FT_Bitmap *source, + FT_Bitmap *target ); + + + /************************************************************************** + * + * @function: + * FT_Bitmap_Embolden + * + * @description: + * Embolden a bitmap. The new bitmap will be about `xStrength` pixels + * wider and `yStrength` pixels higher. The left and bottom borders are + * kept unchanged. + * + * @input: + * library :: + * A handle to a library object. + * + * xStrength :: + * How strong the glyph is emboldened horizontally. Expressed in 26.6 + * pixel format. + * + * yStrength :: + * How strong the glyph is emboldened vertically. Expressed in 26.6 + * pixel format. + * + * @inout: + * bitmap :: + * A handle to the target bitmap. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The current implementation restricts `xStrength` to be less than or + * equal to~8 if bitmap is of pixel_mode @FT_PIXEL_MODE_MONO. + * + * If you want to embolden the bitmap owned by a @FT_GlyphSlotRec, you + * should call @FT_GlyphSlot_Own_Bitmap on the slot first. + * + * Bitmaps in @FT_PIXEL_MODE_GRAY2 and @FT_PIXEL_MODE_GRAY@ format are + * converted to @FT_PIXEL_MODE_GRAY format (i.e., 8bpp). + */ + FT_EXPORT( FT_Error ) + FT_Bitmap_Embolden( FT_Library library, + FT_Bitmap* bitmap, + FT_Pos xStrength, + FT_Pos yStrength ); + + + /************************************************************************** + * + * @function: + * FT_Bitmap_Convert + * + * @description: + * Convert a bitmap object with depth 1bpp, 2bpp, 4bpp, 8bpp or 32bpp to + * a bitmap object with depth 8bpp, making the number of used bytes per + * line (a.k.a. the 'pitch') a multiple of `alignment`. + * + * @input: + * library :: + * A handle to a library object. + * + * source :: + * The source bitmap. + * + * alignment :: + * The pitch of the bitmap is a multiple of this argument. Common + * values are 1, 2, or 4. + * + * @output: + * target :: + * The target bitmap. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * It is possible to call @FT_Bitmap_Convert multiple times without + * calling @FT_Bitmap_Done (the memory is simply reallocated). + * + * Use @FT_Bitmap_Done to finally remove the bitmap object. + * + * The `library` argument is taken to have access to FreeType's memory + * handling functions. + * + * `source->buffer` and `target->buffer` must neither be equal nor + * overlap. + */ + FT_EXPORT( FT_Error ) + FT_Bitmap_Convert( FT_Library library, + const FT_Bitmap *source, + FT_Bitmap *target, + FT_Int alignment ); + + + /************************************************************************** + * + * @function: + * FT_Bitmap_Blend + * + * @description: + * Blend a bitmap onto another bitmap, using a given color. + * + * @input: + * library :: + * A handle to a library object. + * + * source :: + * The source bitmap, which can have any @FT_Pixel_Mode format. + * + * source_offset :: + * The offset vector to the upper left corner of the source bitmap in + * 26.6 pixel format. It should represent an integer offset; the + * function will set the lowest six bits to zero to enforce that. + * + * color :: + * The color used to draw `source` onto `target`. + * + * @inout: + * target :: + * A handle to an `FT_Bitmap` object. It should be either initialized + * as empty with a call to @FT_Bitmap_Init, or it should be of type + * @FT_PIXEL_MODE_BGRA. + * + * atarget_offset :: + * The offset vector to the upper left corner of the target bitmap in + * 26.6 pixel format. It should represent an integer offset; the + * function will set the lowest six bits to zero to enforce that. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function doesn't perform clipping. + * + * The bitmap in `target` gets allocated or reallocated as needed; the + * vector `atarget_offset` is updated accordingly. + * + * In case of allocation or reallocation, the bitmap's pitch is set to + * `4 * width`. Both `source` and `target` must have the same bitmap + * flow (as indicated by the sign of the `pitch` field). + * + * `source->buffer` and `target->buffer` must neither be equal nor + * overlap. + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Error ) + FT_Bitmap_Blend( FT_Library library, + const FT_Bitmap* source, + const FT_Vector source_offset, + FT_Bitmap* target, + FT_Vector *atarget_offset, + FT_Color color ); + + + /************************************************************************** + * + * @function: + * FT_GlyphSlot_Own_Bitmap + * + * @description: + * Make sure that a glyph slot owns `slot->bitmap`. + * + * @input: + * slot :: + * The glyph slot. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function is to be used in combination with @FT_Bitmap_Embolden. + */ + FT_EXPORT( FT_Error ) + FT_GlyphSlot_Own_Bitmap( FT_GlyphSlot slot ); + + + /************************************************************************** + * + * @function: + * FT_Bitmap_Done + * + * @description: + * Destroy a bitmap object initialized with @FT_Bitmap_Init. + * + * @input: + * library :: + * A handle to a library object. + * + * bitmap :: + * The bitmap object to be freed. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The `library` argument is taken to have access to FreeType's memory + * handling functions. + */ + FT_EXPORT( FT_Error ) + FT_Bitmap_Done( FT_Library library, + FT_Bitmap *bitmap ); + + + /* */ + + +FT_END_HEADER + +#endif /* FTBITMAP_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftbzip2.h b/Example/include/freetype/ftbzip2.h new file mode 100644 index 0000000..7d29f46 --- /dev/null +++ b/Example/include/freetype/ftbzip2.h @@ -0,0 +1,102 @@ +/**************************************************************************** + * + * ftbzip2.h + * + * Bzip2-compressed stream support. + * + * Copyright (C) 2010-2023 by + * Joel Klinghed. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTBZIP2_H_ +#define FTBZIP2_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @section: + * bzip2 + * + * @title: + * BZIP2 Streams + * + * @abstract: + * Using bzip2-compressed font files. + * + * @description: + * In certain builds of the library, bzip2 compression recognition is + * automatically handled when calling @FT_New_Face or @FT_Open_Face. + * This means that if no font driver is capable of handling the raw + * compressed file, the library will try to open a bzip2 compressed + * stream from it and re-open the face with it. + * + * The stream implementation is very basic and resets the decompression + * process each time seeking backwards is needed within the stream, + * which significantly undermines the performance. + * + * This section contains the declaration of Bzip2-specific functions. + * + */ + + + /************************************************************************** + * + * @function: + * FT_Stream_OpenBzip2 + * + * @description: + * Open a new stream to parse bzip2-compressed font files. This is + * mainly used to support the compressed `*.pcf.bz2` fonts that come with + * XFree86. + * + * @input: + * stream :: + * The target embedding stream. + * + * source :: + * The source stream. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The source stream must be opened _before_ calling this function. + * + * Calling the internal function `FT_Stream_Close` on the new stream will + * **not** call `FT_Stream_Close` on the source stream. None of the + * stream objects will be released to the heap. + * + * This function may return `FT_Err_Unimplemented_Feature` if your build + * of FreeType was not compiled with bzip2 support. + */ + FT_EXPORT( FT_Error ) + FT_Stream_OpenBzip2( FT_Stream stream, + FT_Stream source ); + + /* */ + + +FT_END_HEADER + +#endif /* FTBZIP2_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftcache.h b/Example/include/freetype/ftcache.h new file mode 100644 index 0000000..a2072e2 --- /dev/null +++ b/Example/include/freetype/ftcache.h @@ -0,0 +1,1087 @@ +/**************************************************************************** + * + * ftcache.h + * + * FreeType Cache subsystem (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTCACHE_H_ +#define FTCACHE_H_ + + +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * cache_subsystem + * + * @title: + * Cache Sub-System + * + * @abstract: + * How to cache face, size, and glyph data with FreeType~2. + * + * @description: + * This section describes the FreeType~2 cache sub-system, which is used + * to limit the number of concurrently opened @FT_Face and @FT_Size + * objects, as well as caching information like character maps and glyph + * images while limiting their maximum memory usage. + * + * Note that all types and functions begin with the `FTC_` prefix rather + * than the usual `FT_` prefix in the rest of FreeType. + * + * The cache is highly portable and, thus, doesn't know anything about + * the fonts installed on your system, or how to access them. Therefore, + * it requires the following. + * + * * @FTC_FaceID, an arbitrary non-zero value that uniquely identifies + * available or installed font faces, has to be provided to the + * cache by the client. Note that the cache only stores and compares + * these values and doesn't try to interpret them in any way, but they + * have to be persistent on the client side. + * + * * @FTC_Face_Requester, a method to convert an @FTC_FaceID into a new + * @FT_Face object when necessary, has to be provided to the cache by + * the client. The @FT_Face object is completely managed by the cache, + * including its termination through @FT_Done_Face. To monitor + * termination of face objects, the finalizer callback in the `generic` + * field of the @FT_Face object can be used, which might also be used + * to store the @FTC_FaceID of the face. + * + * Clients are free to map face IDs to anything useful. The most simple + * usage is, for example, to associate them to a `{pathname,face_index}` + * pair that is then used by @FTC_Face_Requester to call @FT_New_Face. + * However, more complex schemes are also possible. + * + * Note that for the cache to work correctly, the face ID values must be + * **persistent**, which means that the contents they point to should not + * change at runtime, or that their value should not become invalid. + * If this is unavoidable (e.g., when a font is uninstalled at runtime), + * you should call @FTC_Manager_RemoveFaceID as soon as possible to let + * the cache get rid of any references to the old @FTC_FaceID it may keep + * internally. Failure to do so will lead to incorrect behaviour or even + * crashes in @FTC_Face_Requester. + * + * To use the cache, start with calling @FTC_Manager_New to create a new + * @FTC_Manager object, which models a single cache instance. You can + * then look up @FT_Face and @FT_Size objects with + * @FTC_Manager_LookupFace and @FTC_Manager_LookupSize, respectively, and + * use them in any FreeType work stream. You can also cache other + * FreeType objects as follows. + * + * * If you want to use the charmap caching, call @FTC_CMapCache_New, + * then later use @FTC_CMapCache_Lookup to perform the equivalent of + * @FT_Get_Char_Index, only much faster. + * + * * If you want to use the @FT_Glyph caching, call @FTC_ImageCache_New, + * then later use @FTC_ImageCache_Lookup to retrieve the corresponding + * @FT_Glyph objects from the cache. + * + * * If you need lots of small bitmaps, it is much more memory-efficient + * to call @FTC_SBitCache_New followed by @FTC_SBitCache_Lookup. This + * returns @FTC_SBitRec structures, which are used to store small + * bitmaps directly. (A small bitmap is one whose metrics and + * dimensions all fit into 8-bit integers). + * + * @order: + * FTC_Manager + * FTC_FaceID + * FTC_Face_Requester + * + * FTC_Manager_New + * FTC_Manager_Reset + * FTC_Manager_Done + * FTC_Manager_LookupFace + * FTC_Manager_LookupSize + * FTC_Manager_RemoveFaceID + * + * FTC_Node + * FTC_Node_Unref + * + * FTC_ImageCache + * FTC_ImageCache_New + * FTC_ImageCache_Lookup + * + * FTC_SBit + * FTC_SBitCache + * FTC_SBitCache_New + * FTC_SBitCache_Lookup + * + * FTC_CMapCache + * FTC_CMapCache_New + * FTC_CMapCache_Lookup + * + *************************************************************************/ + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** BASIC TYPE DEFINITIONS *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @type: + * FTC_FaceID + * + * @description: + * An opaque pointer type that is used to identity face objects. The + * contents of such objects is application-dependent. + * + * These pointers are typically used to point to a user-defined structure + * containing a font file path, and face index. + * + * @note: + * Never use `NULL` as a valid @FTC_FaceID. + * + * Face IDs are passed by the client to the cache manager that calls, + * when needed, the @FTC_Face_Requester to translate them into new + * @FT_Face objects. + * + * If the content of a given face ID changes at runtime, or if the value + * becomes invalid (e.g., when uninstalling a font), you should + * immediately call @FTC_Manager_RemoveFaceID before any other cache + * function. + * + * Failure to do so will result in incorrect behaviour or even memory + * leaks and crashes. + */ + typedef FT_Pointer FTC_FaceID; + + + /************************************************************************** + * + * @functype: + * FTC_Face_Requester + * + * @description: + * A callback function provided by client applications. It is used by + * the cache manager to translate a given @FTC_FaceID into a new valid + * @FT_Face object, on demand. + * + * @input: + * face_id :: + * The face ID to resolve. + * + * library :: + * A handle to a FreeType library object. + * + * req_data :: + * Application-provided request data (see note below). + * + * @output: + * aface :: + * A new @FT_Face handle. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The third parameter `req_data` is the same as the one passed by the + * client when @FTC_Manager_New is called. + * + * The face requester should not perform funny things on the returned + * face object, like creating a new @FT_Size for it, or setting a + * transformation through @FT_Set_Transform! + */ + typedef FT_Error + (*FTC_Face_Requester)( FTC_FaceID face_id, + FT_Library library, + FT_Pointer req_data, + FT_Face* aface ); + + /* */ + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** CACHE MANAGER OBJECT *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @type: + * FTC_Manager + * + * @description: + * This object corresponds to one instance of the cache-subsystem. It is + * used to cache one or more @FT_Face objects, along with corresponding + * @FT_Size objects. + * + * The manager intentionally limits the total number of opened @FT_Face + * and @FT_Size objects to control memory usage. See the `max_faces` and + * `max_sizes` parameters of @FTC_Manager_New. + * + * The manager is also used to cache 'nodes' of various types while + * limiting their total memory usage. + * + * All limitations are enforced by keeping lists of managed objects in + * most-recently-used order, and flushing old nodes to make room for new + * ones. + */ + typedef struct FTC_ManagerRec_* FTC_Manager; + + + /************************************************************************** + * + * @type: + * FTC_Node + * + * @description: + * An opaque handle to a cache node object. Each cache node is + * reference-counted. A node with a count of~0 might be flushed out of a + * full cache whenever a lookup request is performed. + * + * If you look up nodes, you have the ability to 'acquire' them, i.e., to + * increment their reference count. This will prevent the node from + * being flushed out of the cache until you explicitly 'release' it (see + * @FTC_Node_Unref). + * + * See also @FTC_SBitCache_Lookup and @FTC_ImageCache_Lookup. + */ + typedef struct FTC_NodeRec_* FTC_Node; + + + /************************************************************************** + * + * @function: + * FTC_Manager_New + * + * @description: + * Create a new cache manager. + * + * @input: + * library :: + * The parent FreeType library handle to use. + * + * max_faces :: + * Maximum number of opened @FT_Face objects managed by this cache + * instance. Use~0 for defaults. + * + * max_sizes :: + * Maximum number of opened @FT_Size objects managed by this cache + * instance. Use~0 for defaults. + * + * max_bytes :: + * Maximum number of bytes to use for cached data nodes. Use~0 for + * defaults. Note that this value does not account for managed + * @FT_Face and @FT_Size objects. + * + * requester :: + * An application-provided callback used to translate face IDs into + * real @FT_Face objects. + * + * req_data :: + * A generic pointer that is passed to the requester each time it is + * called (see @FTC_Face_Requester). + * + * @output: + * amanager :: + * A handle to a new manager object. 0~in case of failure. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FTC_Manager_New( FT_Library library, + FT_UInt max_faces, + FT_UInt max_sizes, + FT_ULong max_bytes, + FTC_Face_Requester requester, + FT_Pointer req_data, + FTC_Manager *amanager ); + + + /************************************************************************** + * + * @function: + * FTC_Manager_Reset + * + * @description: + * Empty a given cache manager. This simply gets rid of all the + * currently cached @FT_Face and @FT_Size objects within the manager. + * + * @inout: + * manager :: + * A handle to the manager. + */ + FT_EXPORT( void ) + FTC_Manager_Reset( FTC_Manager manager ); + + + /************************************************************************** + * + * @function: + * FTC_Manager_Done + * + * @description: + * Destroy a given manager after emptying it. + * + * @input: + * manager :: + * A handle to the target cache manager object. + */ + FT_EXPORT( void ) + FTC_Manager_Done( FTC_Manager manager ); + + + /************************************************************************** + * + * @function: + * FTC_Manager_LookupFace + * + * @description: + * Retrieve the @FT_Face object that corresponds to a given face ID + * through a cache manager. + * + * @input: + * manager :: + * A handle to the cache manager. + * + * face_id :: + * The ID of the face object. + * + * @output: + * aface :: + * A handle to the face object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The returned @FT_Face object is always owned by the manager. You + * should never try to discard it yourself. + * + * The @FT_Face object doesn't necessarily have a current size object + * (i.e., face->size can be~0). If you need a specific 'font size', use + * @FTC_Manager_LookupSize instead. + * + * Never change the face's transformation matrix (i.e., never call the + * @FT_Set_Transform function) on a returned face! If you need to + * transform glyphs, do it yourself after glyph loading. + * + * When you perform a lookup, out-of-memory errors are detected _within_ + * the lookup and force incremental flushes of the cache until enough + * memory is released for the lookup to succeed. + * + * If a lookup fails with `FT_Err_Out_Of_Memory` the cache has already + * been completely flushed, and still no memory was available for the + * operation. + */ + FT_EXPORT( FT_Error ) + FTC_Manager_LookupFace( FTC_Manager manager, + FTC_FaceID face_id, + FT_Face *aface ); + + + /************************************************************************** + * + * @struct: + * FTC_ScalerRec + * + * @description: + * A structure used to describe a given character size in either pixels + * or points to the cache manager. See @FTC_Manager_LookupSize. + * + * @fields: + * face_id :: + * The source face ID. + * + * width :: + * The character width. + * + * height :: + * The character height. + * + * pixel :: + * A Boolean. If 1, the `width` and `height` fields are interpreted as + * integer pixel character sizes. Otherwise, they are expressed as + * 1/64 of points. + * + * x_res :: + * Only used when `pixel` is value~0 to indicate the horizontal + * resolution in dpi. + * + * y_res :: + * Only used when `pixel` is value~0 to indicate the vertical + * resolution in dpi. + * + * @note: + * This type is mainly used to retrieve @FT_Size objects through the + * cache manager. + */ + typedef struct FTC_ScalerRec_ + { + FTC_FaceID face_id; + FT_UInt width; + FT_UInt height; + FT_Int pixel; + FT_UInt x_res; + FT_UInt y_res; + + } FTC_ScalerRec; + + + /************************************************************************** + * + * @struct: + * FTC_Scaler + * + * @description: + * A handle to an @FTC_ScalerRec structure. + */ + typedef struct FTC_ScalerRec_* FTC_Scaler; + + + /************************************************************************** + * + * @function: + * FTC_Manager_LookupSize + * + * @description: + * Retrieve the @FT_Size object that corresponds to a given + * @FTC_ScalerRec pointer through a cache manager. + * + * @input: + * manager :: + * A handle to the cache manager. + * + * scaler :: + * A scaler handle. + * + * @output: + * asize :: + * A handle to the size object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The returned @FT_Size object is always owned by the manager. You + * should never try to discard it by yourself. + * + * You can access the parent @FT_Face object simply as `size->face` if + * you need it. Note that this object is also owned by the manager. + * + * @note: + * When you perform a lookup, out-of-memory errors are detected _within_ + * the lookup and force incremental flushes of the cache until enough + * memory is released for the lookup to succeed. + * + * If a lookup fails with `FT_Err_Out_Of_Memory` the cache has already + * been completely flushed, and still no memory is available for the + * operation. + */ + FT_EXPORT( FT_Error ) + FTC_Manager_LookupSize( FTC_Manager manager, + FTC_Scaler scaler, + FT_Size *asize ); + + + /************************************************************************** + * + * @function: + * FTC_Node_Unref + * + * @description: + * Decrement a cache node's internal reference count. When the count + * reaches 0, it is not destroyed but becomes eligible for subsequent + * cache flushes. + * + * @input: + * node :: + * The cache node handle. + * + * manager :: + * The cache manager handle. + */ + FT_EXPORT( void ) + FTC_Node_Unref( FTC_Node node, + FTC_Manager manager ); + + + /************************************************************************** + * + * @function: + * FTC_Manager_RemoveFaceID + * + * @description: + * A special function used to indicate to the cache manager that a given + * @FTC_FaceID is no longer valid, either because its content changed, or + * because it was deallocated or uninstalled. + * + * @input: + * manager :: + * The cache manager handle. + * + * face_id :: + * The @FTC_FaceID to be removed. + * + * @note: + * This function flushes all nodes from the cache corresponding to this + * `face_id`, with the exception of nodes with a non-null reference + * count. + * + * Such nodes are however modified internally so as to never appear in + * later lookups with the same `face_id` value, and to be immediately + * destroyed when released by all their users. + * + */ + FT_EXPORT( void ) + FTC_Manager_RemoveFaceID( FTC_Manager manager, + FTC_FaceID face_id ); + + + /************************************************************************** + * + * @type: + * FTC_CMapCache + * + * @description: + * An opaque handle used to model a charmap cache. This cache is to hold + * character codes -> glyph indices mappings. + * + */ + typedef struct FTC_CMapCacheRec_* FTC_CMapCache; + + + /************************************************************************** + * + * @function: + * FTC_CMapCache_New + * + * @description: + * Create a new charmap cache. + * + * @input: + * manager :: + * A handle to the cache manager. + * + * @output: + * acache :: + * A new cache handle. `NULL` in case of error. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Like all other caches, this one will be destroyed with the cache + * manager. + * + */ + FT_EXPORT( FT_Error ) + FTC_CMapCache_New( FTC_Manager manager, + FTC_CMapCache *acache ); + + + /************************************************************************** + * + * @function: + * FTC_CMapCache_Lookup + * + * @description: + * Translate a character code into a glyph index, using the charmap + * cache. + * + * @input: + * cache :: + * A charmap cache handle. + * + * face_id :: + * The source face ID. + * + * cmap_index :: + * The index of the charmap in the source face. Any negative value + * means to use the cache @FT_Face's default charmap. + * + * char_code :: + * The character code (in the corresponding charmap). + * + * @return: + * Glyph index. 0~means 'no glyph'. + * + */ + FT_EXPORT( FT_UInt ) + FTC_CMapCache_Lookup( FTC_CMapCache cache, + FTC_FaceID face_id, + FT_Int cmap_index, + FT_UInt32 char_code ); + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** IMAGE CACHE OBJECT *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * FTC_ImageTypeRec + * + * @description: + * A structure used to model the type of images in a glyph cache. + * + * @fields: + * face_id :: + * The face ID. + * + * width :: + * The width in pixels. + * + * height :: + * The height in pixels. + * + * flags :: + * The load flags, as in @FT_Load_Glyph. + * + */ + typedef struct FTC_ImageTypeRec_ + { + FTC_FaceID face_id; + FT_UInt width; + FT_UInt height; + FT_Int32 flags; + + } FTC_ImageTypeRec; + + + /************************************************************************** + * + * @type: + * FTC_ImageType + * + * @description: + * A handle to an @FTC_ImageTypeRec structure. + * + */ + typedef struct FTC_ImageTypeRec_* FTC_ImageType; + + + /* */ + + +#define FTC_IMAGE_TYPE_COMPARE( d1, d2 ) \ + ( (d1)->face_id == (d2)->face_id && \ + (d1)->width == (d2)->width && \ + (d1)->flags == (d2)->flags ) + + + /************************************************************************** + * + * @type: + * FTC_ImageCache + * + * @description: + * A handle to a glyph image cache object. They are designed to hold + * many distinct glyph images while not exceeding a certain memory + * threshold. + */ + typedef struct FTC_ImageCacheRec_* FTC_ImageCache; + + + /************************************************************************** + * + * @function: + * FTC_ImageCache_New + * + * @description: + * Create a new glyph image cache. + * + * @input: + * manager :: + * The parent manager for the image cache. + * + * @output: + * acache :: + * A handle to the new glyph image cache object. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FTC_ImageCache_New( FTC_Manager manager, + FTC_ImageCache *acache ); + + + /************************************************************************** + * + * @function: + * FTC_ImageCache_Lookup + * + * @description: + * Retrieve a given glyph image from a glyph image cache. + * + * @input: + * cache :: + * A handle to the source glyph image cache. + * + * type :: + * A pointer to a glyph image type descriptor. + * + * gindex :: + * The glyph index to retrieve. + * + * @output: + * aglyph :: + * The corresponding @FT_Glyph object. 0~in case of failure. + * + * anode :: + * Used to return the address of the corresponding cache node after + * incrementing its reference count (see note below). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The returned glyph is owned and managed by the glyph image cache. + * Never try to transform or discard it manually! You can however create + * a copy with @FT_Glyph_Copy and modify the new one. + * + * If `anode` is _not_ `NULL`, it receives the address of the cache node + * containing the glyph image, after increasing its reference count. + * This ensures that the node (as well as the @FT_Glyph) will always be + * kept in the cache until you call @FTC_Node_Unref to 'release' it. + * + * If `anode` is `NULL`, the cache node is left unchanged, which means + * that the @FT_Glyph could be flushed out of the cache on the next call + * to one of the caching sub-system APIs. Don't assume that it is + * persistent! + */ + FT_EXPORT( FT_Error ) + FTC_ImageCache_Lookup( FTC_ImageCache cache, + FTC_ImageType type, + FT_UInt gindex, + FT_Glyph *aglyph, + FTC_Node *anode ); + + + /************************************************************************** + * + * @function: + * FTC_ImageCache_LookupScaler + * + * @description: + * A variant of @FTC_ImageCache_Lookup that uses an @FTC_ScalerRec to + * specify the face ID and its size. + * + * @input: + * cache :: + * A handle to the source glyph image cache. + * + * scaler :: + * A pointer to a scaler descriptor. + * + * load_flags :: + * The corresponding load flags. + * + * gindex :: + * The glyph index to retrieve. + * + * @output: + * aglyph :: + * The corresponding @FT_Glyph object. 0~in case of failure. + * + * anode :: + * Used to return the address of the corresponding cache node after + * incrementing its reference count (see note below). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The returned glyph is owned and managed by the glyph image cache. + * Never try to transform or discard it manually! You can however create + * a copy with @FT_Glyph_Copy and modify the new one. + * + * If `anode` is _not_ `NULL`, it receives the address of the cache node + * containing the glyph image, after increasing its reference count. + * This ensures that the node (as well as the @FT_Glyph) will always be + * kept in the cache until you call @FTC_Node_Unref to 'release' it. + * + * If `anode` is `NULL`, the cache node is left unchanged, which means + * that the @FT_Glyph could be flushed out of the cache on the next call + * to one of the caching sub-system APIs. Don't assume that it is + * persistent! + * + * Calls to @FT_Set_Char_Size and friends have no effect on cached + * glyphs; you should always use the FreeType cache API instead. + */ + FT_EXPORT( FT_Error ) + FTC_ImageCache_LookupScaler( FTC_ImageCache cache, + FTC_Scaler scaler, + FT_ULong load_flags, + FT_UInt gindex, + FT_Glyph *aglyph, + FTC_Node *anode ); + + + /************************************************************************** + * + * @type: + * FTC_SBit + * + * @description: + * A handle to a small bitmap descriptor. See the @FTC_SBitRec structure + * for details. + */ + typedef struct FTC_SBitRec_* FTC_SBit; + + + /************************************************************************** + * + * @struct: + * FTC_SBitRec + * + * @description: + * A very compact structure used to describe a small glyph bitmap. + * + * @fields: + * width :: + * The bitmap width in pixels. + * + * height :: + * The bitmap height in pixels. + * + * left :: + * The horizontal distance from the pen position to the left bitmap + * border (a.k.a. 'left side bearing', or 'lsb'). + * + * top :: + * The vertical distance from the pen position (on the baseline) to the + * upper bitmap border (a.k.a. 'top side bearing'). The distance is + * positive for upwards y~coordinates. + * + * format :: + * The format of the glyph bitmap (monochrome or gray). + * + * max_grays :: + * Maximum gray level value (in the range 1 to~255). + * + * pitch :: + * The number of bytes per bitmap line. May be positive or negative. + * + * xadvance :: + * The horizontal advance width in pixels. + * + * yadvance :: + * The vertical advance height in pixels. + * + * buffer :: + * A pointer to the bitmap pixels. + */ + typedef struct FTC_SBitRec_ + { + FT_Byte width; + FT_Byte height; + FT_Char left; + FT_Char top; + + FT_Byte format; + FT_Byte max_grays; + FT_Short pitch; + FT_Char xadvance; + FT_Char yadvance; + + FT_Byte* buffer; + + } FTC_SBitRec; + + + /************************************************************************** + * + * @type: + * FTC_SBitCache + * + * @description: + * A handle to a small bitmap cache. These are special cache objects + * used to store small glyph bitmaps (and anti-aliased pixmaps) in a much + * more efficient way than the traditional glyph image cache implemented + * by @FTC_ImageCache. + */ + typedef struct FTC_SBitCacheRec_* FTC_SBitCache; + + + /************************************************************************** + * + * @function: + * FTC_SBitCache_New + * + * @description: + * Create a new cache to store small glyph bitmaps. + * + * @input: + * manager :: + * A handle to the source cache manager. + * + * @output: + * acache :: + * A handle to the new sbit cache. `NULL` in case of error. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FTC_SBitCache_New( FTC_Manager manager, + FTC_SBitCache *acache ); + + + /************************************************************************** + * + * @function: + * FTC_SBitCache_Lookup + * + * @description: + * Look up a given small glyph bitmap in a given sbit cache and 'lock' it + * to prevent its flushing from the cache until needed. + * + * @input: + * cache :: + * A handle to the source sbit cache. + * + * type :: + * A pointer to the glyph image type descriptor. + * + * gindex :: + * The glyph index. + * + * @output: + * sbit :: + * A handle to a small bitmap descriptor. + * + * anode :: + * Used to return the address of the corresponding cache node after + * incrementing its reference count (see note below). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The small bitmap descriptor and its bit buffer are owned by the cache + * and should never be freed by the application. They might as well + * disappear from memory on the next cache lookup, so don't treat them as + * persistent data. + * + * The descriptor's `buffer` field is set to~0 to indicate a missing + * glyph bitmap. + * + * If `anode` is _not_ `NULL`, it receives the address of the cache node + * containing the bitmap, after increasing its reference count. This + * ensures that the node (as well as the image) will always be kept in + * the cache until you call @FTC_Node_Unref to 'release' it. + * + * If `anode` is `NULL`, the cache node is left unchanged, which means + * that the bitmap could be flushed out of the cache on the next call to + * one of the caching sub-system APIs. Don't assume that it is + * persistent! + */ + FT_EXPORT( FT_Error ) + FTC_SBitCache_Lookup( FTC_SBitCache cache, + FTC_ImageType type, + FT_UInt gindex, + FTC_SBit *sbit, + FTC_Node *anode ); + + + /************************************************************************** + * + * @function: + * FTC_SBitCache_LookupScaler + * + * @description: + * A variant of @FTC_SBitCache_Lookup that uses an @FTC_ScalerRec to + * specify the face ID and its size. + * + * @input: + * cache :: + * A handle to the source sbit cache. + * + * scaler :: + * A pointer to the scaler descriptor. + * + * load_flags :: + * The corresponding load flags. + * + * gindex :: + * The glyph index. + * + * @output: + * sbit :: + * A handle to a small bitmap descriptor. + * + * anode :: + * Used to return the address of the corresponding cache node after + * incrementing its reference count (see note below). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The small bitmap descriptor and its bit buffer are owned by the cache + * and should never be freed by the application. They might as well + * disappear from memory on the next cache lookup, so don't treat them as + * persistent data. + * + * The descriptor's `buffer` field is set to~0 to indicate a missing + * glyph bitmap. + * + * If `anode` is _not_ `NULL`, it receives the address of the cache node + * containing the bitmap, after increasing its reference count. This + * ensures that the node (as well as the image) will always be kept in + * the cache until you call @FTC_Node_Unref to 'release' it. + * + * If `anode` is `NULL`, the cache node is left unchanged, which means + * that the bitmap could be flushed out of the cache on the next call to + * one of the caching sub-system APIs. Don't assume that it is + * persistent! + */ + FT_EXPORT( FT_Error ) + FTC_SBitCache_LookupScaler( FTC_SBitCache cache, + FTC_Scaler scaler, + FT_ULong load_flags, + FT_UInt gindex, + FTC_SBit *sbit, + FTC_Node *anode ); + + /* */ + + +FT_END_HEADER + +#endif /* FTCACHE_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftchapters.h b/Example/include/freetype/ftchapters.h new file mode 100644 index 0000000..7566fbd --- /dev/null +++ b/Example/include/freetype/ftchapters.h @@ -0,0 +1,168 @@ +/**************************************************************************** + * + * This file defines the structure of the FreeType reference. + * It is used by the python script that generates the HTML files. + * + */ + + + /************************************************************************** + * + * @chapter: + * general_remarks + * + * @title: + * General Remarks + * + * @sections: + * preamble + * header_inclusion + * user_allocation + * + */ + + + /************************************************************************** + * + * @chapter: + * core_api + * + * @title: + * Core API + * + * @sections: + * basic_types + * library_setup + * face_creation + * font_testing_macros + * sizing_and_scaling + * glyph_retrieval + * character_mapping + * information_retrieval + * other_api_data + * + */ + + + /************************************************************************** + * + * @chapter: + * extended_api + * + * @title: + * Extended API + * + * @sections: + * glyph_variants + * color_management + * layer_management + * glyph_management + * mac_specific + * sizes_management + * header_file_macros + * + */ + + + /************************************************************************** + * + * @chapter: + * format_specific + * + * @title: + * Format-Specific API + * + * @sections: + * multiple_masters + * truetype_tables + * type1_tables + * sfnt_names + * bdf_fonts + * cid_fonts + * pfr_fonts + * winfnt_fonts + * svg_fonts + * font_formats + * gasp_table + * + */ + + + /************************************************************************** + * + * @chapter: + * module_specific + * + * @title: + * Controlling FreeType Modules + * + * @sections: + * auto_hinter + * cff_driver + * t1_cid_driver + * tt_driver + * pcf_driver + * ot_svg_driver + * properties + * parameter_tags + * lcd_rendering + * + */ + + + /************************************************************************** + * + * @chapter: + * cache_subsystem + * + * @title: + * Cache Sub-System + * + * @sections: + * cache_subsystem + * + */ + + + /************************************************************************** + * + * @chapter: + * support_api + * + * @title: + * Support API + * + * @sections: + * computations + * list_processing + * outline_processing + * quick_advance + * bitmap_handling + * raster + * glyph_stroker + * system_interface + * module_management + * gzip + * lzw + * bzip2 + * debugging_apis + * + */ + + + /************************************************************************** + * + * @chapter: + * error_codes + * + * @title: + * Error Codes + * + * @sections: + * error_enumerations + * error_code_values + * + */ + + +/* END */ diff --git a/Example/include/freetype/ftcid.h b/Example/include/freetype/ftcid.h new file mode 100644 index 0000000..ef22939 --- /dev/null +++ b/Example/include/freetype/ftcid.h @@ -0,0 +1,167 @@ +/**************************************************************************** + * + * ftcid.h + * + * FreeType API for accessing CID font information (specification). + * + * Copyright (C) 2007-2023 by + * Dereg Clegg and Michael Toftdal. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTCID_H_ +#define FTCID_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * cid_fonts + * + * @title: + * CID Fonts + * + * @abstract: + * CID-keyed font-specific API. + * + * @description: + * This section contains the declaration of CID-keyed font-specific + * functions. + * + */ + + + /************************************************************************** + * + * @function: + * FT_Get_CID_Registry_Ordering_Supplement + * + * @description: + * Retrieve the Registry/Ordering/Supplement triple (also known as the + * "R/O/S") from a CID-keyed font. + * + * @input: + * face :: + * A handle to the input face. + * + * @output: + * registry :: + * The registry, as a C~string, owned by the face. + * + * ordering :: + * The ordering, as a C~string, owned by the face. + * + * supplement :: + * The supplement. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function only works with CID faces, returning an error + * otherwise. + * + * @since: + * 2.3.6 + */ + FT_EXPORT( FT_Error ) + FT_Get_CID_Registry_Ordering_Supplement( FT_Face face, + const char* *registry, + const char* *ordering, + FT_Int *supplement ); + + + /************************************************************************** + * + * @function: + * FT_Get_CID_Is_Internally_CID_Keyed + * + * @description: + * Retrieve the type of the input face, CID keyed or not. In contrast + * to the @FT_IS_CID_KEYED macro this function returns successfully also + * for CID-keyed fonts in an SFNT wrapper. + * + * @input: + * face :: + * A handle to the input face. + * + * @output: + * is_cid :: + * The type of the face as an @FT_Bool. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function only works with CID faces and OpenType fonts, returning + * an error otherwise. + * + * @since: + * 2.3.9 + */ + FT_EXPORT( FT_Error ) + FT_Get_CID_Is_Internally_CID_Keyed( FT_Face face, + FT_Bool *is_cid ); + + + /************************************************************************** + * + * @function: + * FT_Get_CID_From_Glyph_Index + * + * @description: + * Retrieve the CID of the input glyph index. + * + * @input: + * face :: + * A handle to the input face. + * + * glyph_index :: + * The input glyph index. + * + * @output: + * cid :: + * The CID as an @FT_UInt. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function only works with CID faces and OpenType fonts, returning + * an error otherwise. + * + * @since: + * 2.3.9 + */ + FT_EXPORT( FT_Error ) + FT_Get_CID_From_Glyph_Index( FT_Face face, + FT_UInt glyph_index, + FT_UInt *cid ); + + /* */ + + +FT_END_HEADER + +#endif /* FTCID_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftcolor.h b/Example/include/freetype/ftcolor.h new file mode 100644 index 0000000..eae200f --- /dev/null +++ b/Example/include/freetype/ftcolor.h @@ -0,0 +1,1667 @@ +/**************************************************************************** + * + * ftcolor.h + * + * FreeType's glyph color management (specification). + * + * Copyright (C) 2018-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTCOLOR_H_ +#define FTCOLOR_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * color_management + * + * @title: + * Glyph Color Management + * + * @abstract: + * Retrieving and manipulating OpenType's 'CPAL' table data. + * + * @description: + * The functions described here allow access and manipulation of color + * palette entries in OpenType's 'CPAL' tables. + */ + + + /************************************************************************** + * + * @struct: + * FT_Color + * + * @description: + * This structure models a BGRA color value of a 'CPAL' palette entry. + * + * The used color space is sRGB; the colors are not pre-multiplied, and + * alpha values must be explicitly set. + * + * @fields: + * blue :: + * Blue value. + * + * green :: + * Green value. + * + * red :: + * Red value. + * + * alpha :: + * Alpha value, giving the red, green, and blue color's opacity. + * + * @since: + * 2.10 + */ + typedef struct FT_Color_ + { + FT_Byte blue; + FT_Byte green; + FT_Byte red; + FT_Byte alpha; + + } FT_Color; + + + /************************************************************************** + * + * @enum: + * FT_PALETTE_XXX + * + * @description: + * A list of bit field constants used in the `palette_flags` array of the + * @FT_Palette_Data structure to indicate for which background a palette + * with a given index is usable. + * + * @values: + * FT_PALETTE_FOR_LIGHT_BACKGROUND :: + * The palette is appropriate to use when displaying the font on a + * light background such as white. + * + * FT_PALETTE_FOR_DARK_BACKGROUND :: + * The palette is appropriate to use when displaying the font on a dark + * background such as black. + * + * @since: + * 2.10 + */ +#define FT_PALETTE_FOR_LIGHT_BACKGROUND 0x01 +#define FT_PALETTE_FOR_DARK_BACKGROUND 0x02 + + + /************************************************************************** + * + * @struct: + * FT_Palette_Data + * + * @description: + * This structure holds the data of the 'CPAL' table. + * + * @fields: + * num_palettes :: + * The number of palettes. + * + * palette_name_ids :: + * An optional read-only array of palette name IDs with `num_palettes` + * elements, corresponding to entries like 'dark' or 'light' in the + * font's 'name' table. + * + * An empty name ID in the 'CPAL' table gets represented as value + * 0xFFFF. + * + * `NULL` if the font's 'CPAL' table doesn't contain appropriate data. + * + * palette_flags :: + * An optional read-only array of palette flags with `num_palettes` + * elements. Possible values are an ORed combination of + * @FT_PALETTE_FOR_LIGHT_BACKGROUND and + * @FT_PALETTE_FOR_DARK_BACKGROUND. + * + * `NULL` if the font's 'CPAL' table doesn't contain appropriate data. + * + * num_palette_entries :: + * The number of entries in a single palette. All palettes have the + * same size. + * + * palette_entry_name_ids :: + * An optional read-only array of palette entry name IDs with + * `num_palette_entries`. In each palette, entries with the same index + * have the same function. For example, index~0 might correspond to + * string 'outline' in the font's 'name' table to indicate that this + * palette entry is used for outlines, index~1 might correspond to + * 'fill' to indicate the filling color palette entry, etc. + * + * An empty entry name ID in the 'CPAL' table gets represented as value + * 0xFFFF. + * + * `NULL` if the font's 'CPAL' table doesn't contain appropriate data. + * + * @note: + * Use function @FT_Get_Sfnt_Name to map name IDs and entry name IDs to + * name strings. + * + * Use function @FT_Palette_Select to get the colors associated with a + * palette entry. + * + * @since: + * 2.10 + */ + typedef struct FT_Palette_Data_ { + FT_UShort num_palettes; + const FT_UShort* palette_name_ids; + const FT_UShort* palette_flags; + + FT_UShort num_palette_entries; + const FT_UShort* palette_entry_name_ids; + + } FT_Palette_Data; + + + /************************************************************************** + * + * @function: + * FT_Palette_Data_Get + * + * @description: + * Retrieve the face's color palette data. + * + * @input: + * face :: + * The source face handle. + * + * @output: + * apalette :: + * A pointer to an @FT_Palette_Data structure. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * All arrays in the returned @FT_Palette_Data structure are read-only. + * + * This function always returns an error if the config macro + * `TT_CONFIG_OPTION_COLOR_LAYERS` is not defined in `ftoption.h`. + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Error ) + FT_Palette_Data_Get( FT_Face face, + FT_Palette_Data *apalette ); + + + /************************************************************************** + * + * @function: + * FT_Palette_Select + * + * @description: + * This function has two purposes. + * + * (1) It activates a palette for rendering color glyphs, and + * + * (2) it retrieves all (unmodified) color entries of this palette. This + * function returns a read-write array, which means that a calling + * application can modify the palette entries on demand. + * + * A corollary of (2) is that calling the function, then modifying some + * values, then calling the function again with the same arguments resets + * all color entries to the original 'CPAL' values; all user modifications + * are lost. + * + * @input: + * face :: + * The source face handle. + * + * palette_index :: + * The palette index. + * + * @output: + * apalette :: + * An array of color entries for a palette with index `palette_index`, + * having `num_palette_entries` elements (as found in the + * `FT_Palette_Data` structure). If `apalette` is set to `NULL`, no + * array gets returned (and no color entries can be modified). + * + * In case the font doesn't support color palettes, `NULL` is returned. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The array pointed to by `apalette_entries` is owned and managed by + * FreeType. + * + * This function always returns an error if the config macro + * `TT_CONFIG_OPTION_COLOR_LAYERS` is not defined in `ftoption.h`. + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Error ) + FT_Palette_Select( FT_Face face, + FT_UShort palette_index, + FT_Color* *apalette ); + + + /************************************************************************** + * + * @function: + * FT_Palette_Set_Foreground_Color + * + * @description: + * 'COLR' uses palette index 0xFFFF to indicate a 'text foreground + * color'. This function sets this value. + * + * @input: + * face :: + * The source face handle. + * + * foreground_color :: + * An `FT_Color` structure to define the text foreground color. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If this function isn't called, the text foreground color is set to + * white opaque (BGRA value 0xFFFFFFFF) if + * @FT_PALETTE_FOR_DARK_BACKGROUND is present for the current palette, + * and black opaque (BGRA value 0x000000FF) otherwise, including the case + * that no palette types are available in the 'CPAL' table. + * + * This function always returns an error if the config macro + * `TT_CONFIG_OPTION_COLOR_LAYERS` is not defined in `ftoption.h`. + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Error ) + FT_Palette_Set_Foreground_Color( FT_Face face, + FT_Color foreground_color ); + + + /************************************************************************** + * + * @section: + * layer_management + * + * @title: + * Glyph Layer Management + * + * @abstract: + * Retrieving and manipulating OpenType's 'COLR' table data. + * + * @description: + * The functions described here allow access of colored glyph layer data + * in OpenType's 'COLR' tables. + */ + + + /************************************************************************** + * + * @struct: + * FT_LayerIterator + * + * @description: + * This iterator object is needed for @FT_Get_Color_Glyph_Layer. + * + * @fields: + * num_layers :: + * The number of glyph layers for the requested glyph index. Will be + * set by @FT_Get_Color_Glyph_Layer. + * + * layer :: + * The current layer. Will be set by @FT_Get_Color_Glyph_Layer. + * + * p :: + * An opaque pointer into 'COLR' table data. The caller must set this + * to `NULL` before the first call of @FT_Get_Color_Glyph_Layer. + */ + typedef struct FT_LayerIterator_ + { + FT_UInt num_layers; + FT_UInt layer; + FT_Byte* p; + + } FT_LayerIterator; + + + /************************************************************************** + * + * @function: + * FT_Get_Color_Glyph_Layer + * + * @description: + * This is an interface to the 'COLR' table in OpenType fonts to + * iteratively retrieve the colored glyph layers associated with the + * current glyph slot. + * + * https://docs.microsoft.com/en-us/typography/opentype/spec/colr + * + * The glyph layer data for a given glyph index, if present, provides an + * alternative, multi-color glyph representation: Instead of rendering + * the outline or bitmap with the given glyph index, glyphs with the + * indices and colors returned by this function are rendered layer by + * layer. + * + * The returned elements are ordered in the z~direction from bottom to + * top; the 'n'th element should be rendered with the associated palette + * color and blended on top of the already rendered layers (elements 0, + * 1, ..., n-1). + * + * @input: + * face :: + * A handle to the parent face object. + * + * base_glyph :: + * The glyph index the colored glyph layers are associated with. + * + * @inout: + * iterator :: + * An @FT_LayerIterator object. For the first call you should set + * `iterator->p` to `NULL`. For all following calls, simply use the + * same object again. + * + * @output: + * aglyph_index :: + * The glyph index of the current layer. + * + * acolor_index :: + * The color index into the font face's color palette of the current + * layer. The value 0xFFFF is special; it doesn't reference a palette + * entry but indicates that the text foreground color should be used + * instead (to be set up by the application outside of FreeType). + * + * The color palette can be retrieved with @FT_Palette_Select. + * + * @return: + * Value~1 if everything is OK. If there are no more layers (or if there + * are no layers at all), value~0 gets returned. In case of an error, + * value~0 is returned also. + * + * @note: + * This function is necessary if you want to handle glyph layers by + * yourself. In particular, functions that operate with @FT_GlyphRec + * objects (like @FT_Get_Glyph or @FT_Glyph_To_Bitmap) don't have access + * to this information. + * + * Note that @FT_Render_Glyph is able to handle colored glyph layers + * automatically if the @FT_LOAD_COLOR flag is passed to a previous call + * to @FT_Load_Glyph. [This is an experimental feature.] + * + * @example: + * ``` + * FT_Color* palette; + * FT_LayerIterator iterator; + * + * FT_Bool have_layers; + * FT_UInt layer_glyph_index; + * FT_UInt layer_color_index; + * + * + * error = FT_Palette_Select( face, palette_index, &palette ); + * if ( error ) + * palette = NULL; + * + * iterator.p = NULL; + * have_layers = FT_Get_Color_Glyph_Layer( face, + * glyph_index, + * &layer_glyph_index, + * &layer_color_index, + * &iterator ); + * + * if ( palette && have_layers ) + * { + * do + * { + * FT_Color layer_color; + * + * + * if ( layer_color_index == 0xFFFF ) + * layer_color = text_foreground_color; + * else + * layer_color = palette[layer_color_index]; + * + * // Load and render glyph `layer_glyph_index', then + * // blend resulting pixmap (using color `layer_color') + * // with previously created pixmaps. + * + * } while ( FT_Get_Color_Glyph_Layer( face, + * glyph_index, + * &layer_glyph_index, + * &layer_color_index, + * &iterator ) ); + * } + * ``` + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Bool ) + FT_Get_Color_Glyph_Layer( FT_Face face, + FT_UInt base_glyph, + FT_UInt *aglyph_index, + FT_UInt *acolor_index, + FT_LayerIterator* iterator ); + + + /************************************************************************** + * + * @enum: + * FT_PaintFormat + * + * @description: + * Enumeration describing the different paint format types of the v1 + * extensions to the 'COLR' table, see + * 'https://github.com/googlefonts/colr-gradients-spec'. + * + * The enumeration values loosely correspond with the format numbers of + * the specification: FreeType always returns a fully specified 'Paint' + * structure for the 'Transform', 'Translate', 'Scale', 'Rotate', and + * 'Skew' table types even though the specification has different formats + * depending on whether or not a center is specified, whether the scale + * is uniform in x and y~direction or not, etc. Also, only non-variable + * format identifiers are listed in this enumeration; as soon as support + * for variable 'COLR' v1 fonts is implemented, interpolation is + * performed dependent on axis coordinates, which are configured on the + * @FT_Face through @FT_Set_Var_Design_Coordinates. This implies that + * always static, readily interpolated values are returned in the 'Paint' + * structures. + * + * @since: + * 2.13 + */ + typedef enum FT_PaintFormat_ + { + FT_COLR_PAINTFORMAT_COLR_LAYERS = 1, + FT_COLR_PAINTFORMAT_SOLID = 2, + FT_COLR_PAINTFORMAT_LINEAR_GRADIENT = 4, + FT_COLR_PAINTFORMAT_RADIAL_GRADIENT = 6, + FT_COLR_PAINTFORMAT_SWEEP_GRADIENT = 8, + FT_COLR_PAINTFORMAT_GLYPH = 10, + FT_COLR_PAINTFORMAT_COLR_GLYPH = 11, + FT_COLR_PAINTFORMAT_TRANSFORM = 12, + FT_COLR_PAINTFORMAT_TRANSLATE = 14, + FT_COLR_PAINTFORMAT_SCALE = 16, + FT_COLR_PAINTFORMAT_ROTATE = 24, + FT_COLR_PAINTFORMAT_SKEW = 28, + FT_COLR_PAINTFORMAT_COMPOSITE = 32, + FT_COLR_PAINT_FORMAT_MAX = 33, + FT_COLR_PAINTFORMAT_UNSUPPORTED = 255 + + } FT_PaintFormat; + + + /************************************************************************** + * + * @struct: + * FT_ColorStopIterator + * + * @description: + * This iterator object is needed for @FT_Get_Colorline_Stops. It keeps + * state while iterating over the stops of an @FT_ColorLine, representing + * the `ColorLine` struct of the v1 extensions to 'COLR', see + * 'https://github.com/googlefonts/colr-gradients-spec'. Do not manually + * modify fields of this iterator. + * + * @fields: + * num_color_stops :: + * The number of color stops for the requested glyph index. Set by + * @FT_Get_Paint. + * + * current_color_stop :: + * The current color stop. Set by @FT_Get_Colorline_Stops. + * + * p :: + * An opaque pointer into 'COLR' table data. Set by @FT_Get_Paint. + * Updated by @FT_Get_Colorline_Stops. + * + * read_variable :: + * A boolean keeping track of whether variable color lines are to be + * read. Set by @FT_Get_Paint. + * + * @since: + * 2.13 + */ + typedef struct FT_ColorStopIterator_ + { + FT_UInt num_color_stops; + FT_UInt current_color_stop; + + FT_Byte* p; + + FT_Bool read_variable; + + } FT_ColorStopIterator; + + + /************************************************************************** + * + * @struct: + * FT_ColorIndex + * + * @description: + * A structure representing a `ColorIndex` value of the 'COLR' v1 + * extensions, see 'https://github.com/googlefonts/colr-gradients-spec'. + * + * @fields: + * palette_index :: + * The palette index into a 'CPAL' palette. + * + * alpha :: + * Alpha transparency value multiplied with the value from 'CPAL'. + * + * @since: + * 2.13 + */ + typedef struct FT_ColorIndex_ + { + FT_UInt16 palette_index; + FT_F2Dot14 alpha; + + } FT_ColorIndex; + + + /************************************************************************** + * + * @struct: + * FT_ColorStop + * + * @description: + * A structure representing a `ColorStop` value of the 'COLR' v1 + * extensions, see 'https://github.com/googlefonts/colr-gradients-spec'. + * + * @fields: + * stop_offset :: + * The stop offset along the gradient, expressed as a 16.16 fixed-point + * coordinate. + * + * color :: + * The color information for this stop, see @FT_ColorIndex. + * + * @since: + * 2.13 + */ + typedef struct FT_ColorStop_ + { + FT_Fixed stop_offset; + FT_ColorIndex color; + + } FT_ColorStop; + + + /************************************************************************** + * + * @enum: + * FT_PaintExtend + * + * @description: + * An enumeration representing the 'Extend' mode of the 'COLR' v1 + * extensions, see 'https://github.com/googlefonts/colr-gradients-spec'. + * It describes how the gradient fill continues at the other boundaries. + * + * @since: + * 2.13 + */ + typedef enum FT_PaintExtend_ + { + FT_COLR_PAINT_EXTEND_PAD = 0, + FT_COLR_PAINT_EXTEND_REPEAT = 1, + FT_COLR_PAINT_EXTEND_REFLECT = 2 + + } FT_PaintExtend; + + + /************************************************************************** + * + * @struct: + * FT_ColorLine + * + * @description: + * A structure representing a `ColorLine` value of the 'COLR' v1 + * extensions, see 'https://github.com/googlefonts/colr-gradients-spec'. + * It describes a list of color stops along the defined gradient. + * + * @fields: + * extend :: + * The extend mode at the outer boundaries, see @FT_PaintExtend. + * + * color_stop_iterator :: + * The @FT_ColorStopIterator used to enumerate and retrieve the + * actual @FT_ColorStop's. + * + * @since: + * 2.13 + */ + typedef struct FT_ColorLine_ + { + FT_PaintExtend extend; + FT_ColorStopIterator color_stop_iterator; + + } FT_ColorLine; + + + /************************************************************************** + * + * @struct: + * FT_Affine23 + * + * @description: + * A structure used to store a 2x3 matrix. Coefficients are in + * 16.16 fixed-point format. The computation performed is + * + * ``` + * x' = x*xx + y*xy + dx + * y' = x*yx + y*yy + dy + * ``` + * + * @fields: + * xx :: + * Matrix coefficient. + * + * xy :: + * Matrix coefficient. + * + * dx :: + * x translation. + * + * yx :: + * Matrix coefficient. + * + * yy :: + * Matrix coefficient. + * + * dy :: + * y translation. + * + * @since: + * 2.13 + */ + typedef struct FT_Affine_23_ + { + FT_Fixed xx, xy, dx; + FT_Fixed yx, yy, dy; + + } FT_Affine23; + + + /************************************************************************** + * + * @enum: + * FT_Composite_Mode + * + * @description: + * An enumeration listing the 'COLR' v1 composite modes used in + * @FT_PaintComposite. For more details on each paint mode, see + * 'https://www.w3.org/TR/compositing-1/#porterduffcompositingoperators'. + * + * @since: + * 2.13 + */ + typedef enum FT_Composite_Mode_ + { + FT_COLR_COMPOSITE_CLEAR = 0, + FT_COLR_COMPOSITE_SRC = 1, + FT_COLR_COMPOSITE_DEST = 2, + FT_COLR_COMPOSITE_SRC_OVER = 3, + FT_COLR_COMPOSITE_DEST_OVER = 4, + FT_COLR_COMPOSITE_SRC_IN = 5, + FT_COLR_COMPOSITE_DEST_IN = 6, + FT_COLR_COMPOSITE_SRC_OUT = 7, + FT_COLR_COMPOSITE_DEST_OUT = 8, + FT_COLR_COMPOSITE_SRC_ATOP = 9, + FT_COLR_COMPOSITE_DEST_ATOP = 10, + FT_COLR_COMPOSITE_XOR = 11, + FT_COLR_COMPOSITE_PLUS = 12, + FT_COLR_COMPOSITE_SCREEN = 13, + FT_COLR_COMPOSITE_OVERLAY = 14, + FT_COLR_COMPOSITE_DARKEN = 15, + FT_COLR_COMPOSITE_LIGHTEN = 16, + FT_COLR_COMPOSITE_COLOR_DODGE = 17, + FT_COLR_COMPOSITE_COLOR_BURN = 18, + FT_COLR_COMPOSITE_HARD_LIGHT = 19, + FT_COLR_COMPOSITE_SOFT_LIGHT = 20, + FT_COLR_COMPOSITE_DIFFERENCE = 21, + FT_COLR_COMPOSITE_EXCLUSION = 22, + FT_COLR_COMPOSITE_MULTIPLY = 23, + FT_COLR_COMPOSITE_HSL_HUE = 24, + FT_COLR_COMPOSITE_HSL_SATURATION = 25, + FT_COLR_COMPOSITE_HSL_COLOR = 26, + FT_COLR_COMPOSITE_HSL_LUMINOSITY = 27, + FT_COLR_COMPOSITE_MAX = 28 + + } FT_Composite_Mode; + + + /************************************************************************** + * + * @struct: + * FT_OpaquePaint + * + * @description: + * A structure representing an offset to a `Paint` value stored in any + * of the paint tables of a 'COLR' v1 font. Compare Offset<24> there. + * When 'COLR' v1 paint tables represented by FreeType objects such as + * @FT_PaintColrLayers, @FT_PaintComposite, or @FT_PaintTransform + * reference downstream nested paint tables, we do not immediately + * retrieve them but encapsulate their location in this type. Use + * @FT_Get_Paint to retrieve the actual @FT_COLR_Paint object that + * describes the details of the respective paint table. + * + * @fields: + * p :: + * An internal offset to a Paint table, needs to be set to NULL before + * passing this struct as an argument to @FT_Get_Paint. + * + * insert_root_transform :: + * An internal boolean to track whether an initial root transform is + * to be provided. Do not set this value. + * + * @since: + * 2.13 + */ + typedef struct FT_Opaque_Paint_ + { + FT_Byte* p; + FT_Bool insert_root_transform; + } FT_OpaquePaint; + + + /************************************************************************** + * + * @struct: + * FT_PaintColrLayers + * + * @description: + * A structure representing a `PaintColrLayers` table of a 'COLR' v1 + * font. This table describes a set of layers that are to be composited + * with composite mode `FT_COLR_COMPOSITE_SRC_OVER`. The return value + * of this function is an @FT_LayerIterator initialized so that it can + * be used with @FT_Get_Paint_Layers to retrieve the @FT_OpaquePaint + * objects as references to each layer. + * + * @fields: + * layer_iterator :: + * The layer iterator that describes the layers of this paint. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintColrLayers_ + { + FT_LayerIterator layer_iterator; + + } FT_PaintColrLayers; + + + /************************************************************************** + * + * @struct: + * FT_PaintSolid + * + * @description: + * A structure representing a `PaintSolid` value of the 'COLR' v1 + * extensions, see 'https://github.com/googlefonts/colr-gradients-spec'. + * Using a `PaintSolid` value means that the glyph layer filled with + * this paint is solid-colored and does not contain a gradient. + * + * @fields: + * color :: + * The color information for this solid paint, see @FT_ColorIndex. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintSolid_ + { + FT_ColorIndex color; + + } FT_PaintSolid; + + + /************************************************************************** + * + * @struct: + * FT_PaintLinearGradient + * + * @description: + * A structure representing a `PaintLinearGradient` value of the 'COLR' + * v1 extensions, see + * 'https://github.com/googlefonts/colr-gradients-spec'. The glyph + * layer filled with this paint is drawn filled with a linear gradient. + * + * @fields: + * colorline :: + * The @FT_ColorLine information for this paint, i.e., the list of + * color stops along the gradient. + * + * p0 :: + * The starting point of the gradient definition in font units + * represented as a 16.16 fixed-point `FT_Vector`. + * + * p1 :: + * The end point of the gradient definition in font units + * represented as a 16.16 fixed-point `FT_Vector`. + * + * p2 :: + * Optional point~p2 to rotate the gradient in font units + * represented as a 16.16 fixed-point `FT_Vector`. + * Otherwise equal to~p0. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintLinearGradient_ + { + FT_ColorLine colorline; + + /* TODO: Potentially expose those as x0, y0 etc. */ + FT_Vector p0; + FT_Vector p1; + FT_Vector p2; + + } FT_PaintLinearGradient; + + + /************************************************************************** + * + * @struct: + * FT_PaintRadialGradient + * + * @description: + * A structure representing a `PaintRadialGradient` value of the 'COLR' + * v1 extensions, see + * 'https://github.com/googlefonts/colr-gradients-spec'. The glyph + * layer filled with this paint is drawn filled with a radial gradient. + * + * @fields: + * colorline :: + * The @FT_ColorLine information for this paint, i.e., the list of + * color stops along the gradient. + * + * c0 :: + * The center of the starting point of the radial gradient in font + * units represented as a 16.16 fixed-point `FT_Vector`. + * + * r0 :: + * The radius of the starting circle of the radial gradient in font + * units represented as a 16.16 fixed-point value. + * + * c1 :: + * The center of the end point of the radial gradient in font units + * represented as a 16.16 fixed-point `FT_Vector`. + * + * r1 :: + * The radius of the end circle of the radial gradient in font + * units represented as a 16.16 fixed-point value. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintRadialGradient_ + { + FT_ColorLine colorline; + + FT_Vector c0; + FT_Pos r0; + FT_Vector c1; + FT_Pos r1; + + } FT_PaintRadialGradient; + + + /************************************************************************** + * + * @struct: + * FT_PaintSweepGradient + * + * @description: + * A structure representing a `PaintSweepGradient` value of the 'COLR' + * v1 extensions, see + * 'https://github.com/googlefonts/colr-gradients-spec'. The glyph + * layer filled with this paint is drawn filled with a sweep gradient + * from `start_angle` to `end_angle`. + * + * @fields: + * colorline :: + * The @FT_ColorLine information for this paint, i.e., the list of + * color stops along the gradient. + * + * center :: + * The center of the sweep gradient in font units represented as a + * vector of 16.16 fixed-point values. + * + * start_angle :: + * The start angle of the sweep gradient in 16.16 fixed-point + * format specifying degrees divided by 180.0 (as in the + * spec). Multiply by 180.0f to receive degrees value. Values are + * given counter-clockwise, starting from the (positive) y~axis. + * + * end_angle :: + * The end angle of the sweep gradient in 16.16 fixed-point + * format specifying degrees divided by 180.0 (as in the + * spec). Multiply by 180.0f to receive degrees value. Values are + * given counter-clockwise, starting from the (positive) y~axis. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintSweepGradient_ + { + FT_ColorLine colorline; + + FT_Vector center; + FT_Fixed start_angle; + FT_Fixed end_angle; + + } FT_PaintSweepGradient; + + + /************************************************************************** + * + * @struct: + * FT_PaintGlyph + * + * @description: + * A structure representing a 'COLR' v1 `PaintGlyph` paint table. + * + * @fields: + * paint :: + * An opaque paint object pointing to a `Paint` table that serves as + * the fill for the glyph ID. + * + * glyphID :: + * The glyph ID from the 'glyf' table, which serves as the contour + * information that is filled with paint. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintGlyph_ + { + FT_OpaquePaint paint; + FT_UInt glyphID; + + } FT_PaintGlyph; + + + /************************************************************************** + * + * @struct: + * FT_PaintColrGlyph + * + * @description: + * A structure representing a 'COLR' v1 `PaintColorGlyph` paint table. + * + * @fields: + * glyphID :: + * The glyph ID from the `BaseGlyphV1List` table that is drawn for + * this paint. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintColrGlyph_ + { + FT_UInt glyphID; + + } FT_PaintColrGlyph; + + + /************************************************************************** + * + * @struct: + * FT_PaintTransform + * + * @description: + * A structure representing a 'COLR' v1 `PaintTransform` paint table. + * + * @fields: + * paint :: + * An opaque paint that is subject to being transformed. + * + * affine :: + * A 2x3 transformation matrix in @FT_Affine23 format containing + * 16.16 fixed-point values. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintTransform_ + { + FT_OpaquePaint paint; + FT_Affine23 affine; + + } FT_PaintTransform; + + + /************************************************************************** + * + * @struct: + * FT_PaintTranslate + * + * @description: + * A structure representing a 'COLR' v1 `PaintTranslate` paint table. + * Used for translating downstream paints by a given x and y~delta. + * + * @fields: + * paint :: + * An @FT_OpaquePaint object referencing the paint that is to be + * rotated. + * + * dx :: + * Translation in x~direction in font units represented as a + * 16.16 fixed-point value. + * + * dy :: + * Translation in y~direction in font units represented as a + * 16.16 fixed-point value. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintTranslate_ + { + FT_OpaquePaint paint; + + FT_Fixed dx; + FT_Fixed dy; + + } FT_PaintTranslate; + + + /************************************************************************** + * + * @struct: + * FT_PaintScale + * + * @description: + * A structure representing all of the 'COLR' v1 'PaintScale*' paint + * tables. Used for scaling downstream paints by a given x and y~scale, + * with a given center. This structure is used for all 'PaintScale*' + * types that are part of specification; fields of this structure are + * filled accordingly. If there is a center, the center values are set, + * otherwise they are set to the zero coordinate. If the source font + * file has 'PaintScaleUniform*' set, the scale values are set + * accordingly to the same value. + * + * @fields: + * paint :: + * An @FT_OpaquePaint object referencing the paint that is to be + * scaled. + * + * scale_x :: + * Scale factor in x~direction represented as a + * 16.16 fixed-point value. + * + * scale_y :: + * Scale factor in y~direction represented as a + * 16.16 fixed-point value. + * + * center_x :: + * x~coordinate of center point to scale from represented as a + * 16.16 fixed-point value. + * + * center_y :: + * y~coordinate of center point to scale from represented as a + * 16.16 fixed-point value. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintScale_ + { + FT_OpaquePaint paint; + + FT_Fixed scale_x; + FT_Fixed scale_y; + + FT_Fixed center_x; + FT_Fixed center_y; + + } FT_PaintScale; + + + /************************************************************************** + * + * @struct: + * FT_PaintRotate + * + * @description: + * A structure representing a 'COLR' v1 `PaintRotate` paint table. Used + * for rotating downstream paints with a given center and angle. + * + * @fields: + * paint :: + * An @FT_OpaquePaint object referencing the paint that is to be + * rotated. + * + * angle :: + * The rotation angle that is to be applied in degrees divided by + * 180.0 (as in the spec) represented as a 16.16 fixed-point + * value. Multiply by 180.0f to receive degrees value. + * + * center_x :: + * The x~coordinate of the pivot point of the rotation in font + * units represented as a 16.16 fixed-point value. + * + * center_y :: + * The y~coordinate of the pivot point of the rotation in font + * units represented as a 16.16 fixed-point value. + * + * @since: + * 2.13 + */ + + typedef struct FT_PaintRotate_ + { + FT_OpaquePaint paint; + + FT_Fixed angle; + + FT_Fixed center_x; + FT_Fixed center_y; + + } FT_PaintRotate; + + + /************************************************************************** + * + * @struct: + * FT_PaintSkew + * + * @description: + * A structure representing a 'COLR' v1 `PaintSkew` paint table. Used + * for skewing or shearing downstream paints by a given center and + * angle. + * + * @fields: + * paint :: + * An @FT_OpaquePaint object referencing the paint that is to be + * skewed. + * + * x_skew_angle :: + * The skewing angle in x~direction in degrees divided by 180.0 + * (as in the spec) represented as a 16.16 fixed-point + * value. Multiply by 180.0f to receive degrees. + * + * y_skew_angle :: + * The skewing angle in y~direction in degrees divided by 180.0 + * (as in the spec) represented as a 16.16 fixed-point + * value. Multiply by 180.0f to receive degrees. + * + * center_x :: + * The x~coordinate of the pivot point of the skew in font units + * represented as a 16.16 fixed-point value. + * + * center_y :: + * The y~coordinate of the pivot point of the skew in font units + * represented as a 16.16 fixed-point value. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintSkew_ + { + FT_OpaquePaint paint; + + FT_Fixed x_skew_angle; + FT_Fixed y_skew_angle; + + FT_Fixed center_x; + FT_Fixed center_y; + + } FT_PaintSkew; + + + /************************************************************************** + * + * @struct: + * FT_PaintComposite + * + * @description: + * A structure representing a 'COLR' v1 `PaintComposite` paint table. + * Used for compositing two paints in a 'COLR' v1 directed acyclic graph. + * + * @fields: + * source_paint :: + * An @FT_OpaquePaint object referencing the source that is to be + * composited. + * + * composite_mode :: + * An @FT_Composite_Mode enum value determining the composition + * operation. + * + * backdrop_paint :: + * An @FT_OpaquePaint object referencing the backdrop paint that + * `source_paint` is composited onto. + * + * @since: + * 2.13 + */ + typedef struct FT_PaintComposite_ + { + FT_OpaquePaint source_paint; + FT_Composite_Mode composite_mode; + FT_OpaquePaint backdrop_paint; + + } FT_PaintComposite; + + + /************************************************************************** + * + * @union: + * FT_COLR_Paint + * + * @description: + * A union object representing format and details of a paint table of a + * 'COLR' v1 font, see + * 'https://github.com/googlefonts/colr-gradients-spec'. Use + * @FT_Get_Paint to retrieve a @FT_COLR_Paint for an @FT_OpaquePaint + * object. + * + * @fields: + * format :: + * The gradient format for this Paint structure. + * + * u :: + * Union of all paint table types: + * + * * @FT_PaintColrLayers + * * @FT_PaintGlyph + * * @FT_PaintSolid + * * @FT_PaintLinearGradient + * * @FT_PaintRadialGradient + * * @FT_PaintSweepGradient + * * @FT_PaintTransform + * * @FT_PaintTranslate + * * @FT_PaintRotate + * * @FT_PaintSkew + * * @FT_PaintComposite + * * @FT_PaintColrGlyph + * + * @since: + * 2.13 + */ + typedef struct FT_COLR_Paint_ + { + FT_PaintFormat format; + + union + { + FT_PaintColrLayers colr_layers; + FT_PaintGlyph glyph; + FT_PaintSolid solid; + FT_PaintLinearGradient linear_gradient; + FT_PaintRadialGradient radial_gradient; + FT_PaintSweepGradient sweep_gradient; + FT_PaintTransform transform; + FT_PaintTranslate translate; + FT_PaintScale scale; + FT_PaintRotate rotate; + FT_PaintSkew skew; + FT_PaintComposite composite; + FT_PaintColrGlyph colr_glyph; + + } u; + + } FT_COLR_Paint; + + + /************************************************************************** + * + * @enum: + * FT_Color_Root_Transform + * + * @description: + * An enumeration to specify whether @FT_Get_Color_Glyph_Paint is to + * return a root transform to configure the client's graphics context + * matrix. + * + * @values: + * FT_COLOR_INCLUDE_ROOT_TRANSFORM :: + * Do include the root transform as the initial @FT_COLR_Paint object. + * + * FT_COLOR_NO_ROOT_TRANSFORM :: + * Do not output an initial root transform. + * + * @since: + * 2.13 + */ + typedef enum FT_Color_Root_Transform_ + { + FT_COLOR_INCLUDE_ROOT_TRANSFORM, + FT_COLOR_NO_ROOT_TRANSFORM, + + FT_COLOR_ROOT_TRANSFORM_MAX + + } FT_Color_Root_Transform; + + + /************************************************************************** + * + * @struct: + * FT_ClipBox + * + * @description: + * A structure representing a 'COLR' v1 'ClipBox' table. 'COLR' v1 + * glyphs may optionally define a clip box for aiding allocation or + * defining a maximum drawable region. Use @FT_Get_Color_Glyph_ClipBox + * to retrieve it. + * + * @fields: + * bottom_left :: + * The bottom left corner of the clip box as an @FT_Vector with + * fixed-point coordinates in 26.6 format. + * + * top_left :: + * The top left corner of the clip box as an @FT_Vector with + * fixed-point coordinates in 26.6 format. + * + * top_right :: + * The top right corner of the clip box as an @FT_Vector with + * fixed-point coordinates in 26.6 format. + * + * bottom_right :: + * The bottom right corner of the clip box as an @FT_Vector with + * fixed-point coordinates in 26.6 format. + * + * @since: + * 2.13 + */ + typedef struct FT_ClipBox_ + { + FT_Vector bottom_left; + FT_Vector top_left; + FT_Vector top_right; + FT_Vector bottom_right; + + } FT_ClipBox; + + + /************************************************************************** + * + * @function: + * FT_Get_Color_Glyph_Paint + * + * @description: + * This is the starting point and interface to color gradient + * information in a 'COLR' v1 table in OpenType fonts to recursively + * retrieve the paint tables for the directed acyclic graph of a colored + * glyph, given a glyph ID. + * + * https://github.com/googlefonts/colr-gradients-spec + * + * In a 'COLR' v1 font, each color glyph defines a directed acyclic + * graph of nested paint tables, such as `PaintGlyph`, `PaintSolid`, + * `PaintLinearGradient`, `PaintRadialGradient`, and so on. Using this + * function and specifying a glyph ID, one retrieves the root paint + * table for this glyph ID. + * + * This function allows control whether an initial root transform is + * returned to configure scaling, transform, and translation correctly + * on the client's graphics context. The initial root transform is + * computed and returned according to the values configured for @FT_Size + * and @FT_Set_Transform on the @FT_Face object, see below for details + * of the `root_transform` parameter. This has implications for a + * client 'COLR' v1 implementation: When this function returns an + * initially computed root transform, at the time of executing the + * @FT_PaintGlyph operation, the contours should be retrieved using + * @FT_Load_Glyph at unscaled, untransformed size. This is because the + * root transform applied to the graphics context will take care of + * correct scaling. + * + * Alternatively, to allow hinting of contours, at the time of executing + * @FT_Load_Glyph, the current graphics context transformation matrix + * can be decomposed into a scaling matrix and a remainder, and + * @FT_Load_Glyph can be used to retrieve the contours at scaled size. + * Care must then be taken to blit or clip to the graphics context with + * taking this remainder transformation into account. + * + * @input: + * face :: + * A handle to the parent face object. + * + * base_glyph :: + * The glyph index for which to retrieve the root paint table. + * + * root_transform :: + * Specifies whether an initially computed root is returned by the + * @FT_PaintTransform operation to account for the activated size + * (see @FT_Activate_Size) and the configured transform and translate + * (see @FT_Set_Transform). + * + * This root transform is returned before nodes of the glyph graph of + * the font are returned. Subsequent @FT_COLR_Paint structures + * contain unscaled and untransformed values. The inserted root + * transform enables the client application to apply an initial + * transform to its graphics context. When executing subsequent + * FT_COLR_Paint operations, values from @FT_COLR_Paint operations + * will ultimately be correctly scaled because of the root transform + * applied to the graphics context. Use + * @FT_COLOR_INCLUDE_ROOT_TRANSFORM to include the root transform, use + * @FT_COLOR_NO_ROOT_TRANSFORM to not include it. The latter may be + * useful when traversing the 'COLR' v1 glyph graph and reaching a + * @FT_PaintColrGlyph. When recursing into @FT_PaintColrGlyph and + * painting that inline, no additional root transform is needed as it + * has already been applied to the graphics context at the beginning + * of drawing this glyph. + * + * @output: + * paint :: + * The @FT_OpaquePaint object that references the actual paint table. + * + * The respective actual @FT_COLR_Paint object is retrieved via + * @FT_Get_Paint. + * + * @return: + * Value~1 if everything is OK. If no color glyph is found, or the root + * paint could not be retrieved, value~0 gets returned. In case of an + * error, value~0 is returned also. + * + * @since: + * 2.13 + */ + FT_EXPORT( FT_Bool ) + FT_Get_Color_Glyph_Paint( FT_Face face, + FT_UInt base_glyph, + FT_Color_Root_Transform root_transform, + FT_OpaquePaint* paint ); + + + /************************************************************************** + * + * @function: + * FT_Get_Color_Glyph_ClipBox + * + * @description: + * Search for a 'COLR' v1 clip box for the specified `base_glyph` and + * fill the `clip_box` parameter with the 'COLR' v1 'ClipBox' information + * if one is found. + * + * @input: + * face :: + * A handle to the parent face object. + * + * base_glyph :: + * The glyph index for which to retrieve the clip box. + * + * @output: + * clip_box :: + * The clip box for the requested `base_glyph` if one is found. The + * clip box is computed taking scale and transformations configured on + * the @FT_Face into account. @FT_ClipBox contains @FT_Vector values + * in 26.6 format. + * + * @return: + * Value~1 if a clip box is found. If no clip box is found or an error + * occured, value~0 is returned. + * + * @note: + * To retrieve the clip box in font units, reset scale to units-per-em + * and remove transforms configured using @FT_Set_Transform. + * + * @since: + * 2.13 + */ + FT_EXPORT( FT_Bool ) + FT_Get_Color_Glyph_ClipBox( FT_Face face, + FT_UInt base_glyph, + FT_ClipBox* clip_box ); + + + /************************************************************************** + * + * @function: + * FT_Get_Paint_Layers + * + * @description: + * Access the layers of a `PaintColrLayers` table. + * + * If the root paint of a color glyph, or a nested paint of a 'COLR' + * glyph is a `PaintColrLayers` table, this function retrieves the + * layers of the `PaintColrLayers` table. + * + * The @FT_PaintColrLayers object contains an @FT_LayerIterator, which + * is used here to iterate over the layers. Each layer is returned as + * an @FT_OpaquePaint object, which then can be used with @FT_Get_Paint + * to retrieve the actual paint object. + * + * @input: + * face :: + * A handle to the parent face object. + * + * @inout: + * iterator :: + * The @FT_LayerIterator from an @FT_PaintColrLayers object, for which + * the layers are to be retrieved. The internal state of the iterator + * is incremented after one call to this function for retrieving one + * layer. + * + * @output: + * paint :: + * The @FT_OpaquePaint object that references the actual paint table. + * The respective actual @FT_COLR_Paint object is retrieved via + * @FT_Get_Paint. + * + * @return: + * Value~1 if everything is OK. Value~0 gets returned when the paint + * object can not be retrieved or any other error occurs. + * + * @since: + * 2.13 + */ + FT_EXPORT( FT_Bool ) + FT_Get_Paint_Layers( FT_Face face, + FT_LayerIterator* iterator, + FT_OpaquePaint* paint ); + + + /************************************************************************** + * + * @function: + * FT_Get_Colorline_Stops + * + * @description: + * This is an interface to color gradient information in a 'COLR' v1 + * table in OpenType fonts to iteratively retrieve the gradient and + * solid fill information for colored glyph layers for a specified glyph + * ID. + * + * https://github.com/googlefonts/colr-gradients-spec + * + * @input: + * face :: + * A handle to the parent face object. + * + * @inout: + * iterator :: + * The retrieved @FT_ColorStopIterator, configured on an @FT_ColorLine, + * which in turn got retrieved via paint information in + * @FT_PaintLinearGradient or @FT_PaintRadialGradient. + * + * @output: + * color_stop :: + * Color index and alpha value for the retrieved color stop. + * + * @return: + * Value~1 if everything is OK. If there are no more color stops, + * value~0 gets returned. In case of an error, value~0 is returned + * also. + * + * @since: + * 2.13 + */ + FT_EXPORT( FT_Bool ) + FT_Get_Colorline_Stops( FT_Face face, + FT_ColorStop* color_stop, + FT_ColorStopIterator* iterator ); + + + /************************************************************************** + * + * @function: + * FT_Get_Paint + * + * @description: + * Access the details of a paint using an @FT_OpaquePaint opaque paint + * object, which internally stores the offset to the respective `Paint` + * object in the 'COLR' table. + * + * @input: + * face :: + * A handle to the parent face object. + * + * opaque_paint :: + * The opaque paint object for which the underlying @FT_COLR_Paint + * data is to be retrieved. + * + * @output: + * paint :: + * The specific @FT_COLR_Paint object containing information coming + * from one of the font's `Paint*` tables. + * + * @return: + * Value~1 if everything is OK. Value~0 if no details can be found for + * this paint or any other error occured. + * + * @since: + * 2.13 + */ + FT_EXPORT( FT_Bool ) + FT_Get_Paint( FT_Face face, + FT_OpaquePaint opaque_paint, + FT_COLR_Paint* paint ); + + /* */ + + +FT_END_HEADER + +#endif /* FTCOLOR_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftdriver.h b/Example/include/freetype/ftdriver.h new file mode 100644 index 0000000..7af7465 --- /dev/null +++ b/Example/include/freetype/ftdriver.h @@ -0,0 +1,1246 @@ +/**************************************************************************** + * + * ftdriver.h + * + * FreeType API for controlling driver modules (specification only). + * + * Copyright (C) 2017-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTDRIVER_H_ +#define FTDRIVER_H_ + +#include +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * auto_hinter + * + * @title: + * The auto-hinter + * + * @abstract: + * Controlling the auto-hinting module. + * + * @description: + * While FreeType's auto-hinter doesn't expose API functions by itself, + * it is possible to control its behaviour with @FT_Property_Set and + * @FT_Property_Get. The following lists the available properties + * together with the necessary macros and structures. + * + * Note that the auto-hinter's module name is 'autofitter' for historical + * reasons. + * + * Available properties are @increase-x-height, @no-stem-darkening + * (experimental), @darkening-parameters (experimental), + * @glyph-to-script-map (experimental), @fallback-script (experimental), + * and @default-script (experimental), as documented in the @properties + * section. + * + */ + + + /************************************************************************** + * + * @section: + * cff_driver + * + * @title: + * The CFF driver + * + * @abstract: + * Controlling the CFF driver module. + * + * @description: + * While FreeType's CFF driver doesn't expose API functions by itself, it + * is possible to control its behaviour with @FT_Property_Set and + * @FT_Property_Get. + * + * The CFF driver's module name is 'cff'. + * + * Available properties are @hinting-engine, @no-stem-darkening, + * @darkening-parameters, and @random-seed, as documented in the + * @properties section. + * + * + * **Hinting and anti-aliasing principles of the new engine** + * + * The rasterizer is positioning horizontal features (e.g., ascender + * height & x-height, or crossbars) on the pixel grid and minimizing the + * amount of anti-aliasing applied to them, while placing vertical + * features (vertical stems) on the pixel grid without hinting, thus + * representing the stem position and weight accurately. Sometimes the + * vertical stems may be only partially black. In this context, + * 'anti-aliasing' means that stems are not positioned exactly on pixel + * borders, causing a fuzzy appearance. + * + * There are two principles behind this approach. + * + * 1) No hinting in the horizontal direction: Unlike 'superhinted' + * TrueType, which changes glyph widths to accommodate regular + * inter-glyph spacing, Adobe's approach is 'faithful to the design' in + * representing both the glyph width and the inter-glyph spacing designed + * for the font. This makes the screen display as close as it can be to + * the result one would get with infinite resolution, while preserving + * what is considered the key characteristics of each glyph. Note that + * the distances between unhinted and grid-fitted positions at small + * sizes are comparable to kerning values and thus would be noticeable + * (and distracting) while reading if hinting were applied. + * + * One of the reasons to not hint horizontally is anti-aliasing for LCD + * screens: The pixel geometry of modern displays supplies three vertical + * subpixels as the eye moves horizontally across each visible pixel. On + * devices where we can be certain this characteristic is present a + * rasterizer can take advantage of the subpixels to add increments of + * weight. In Western writing systems this turns out to be the more + * critical direction anyway; the weights and spacing of vertical stems + * (see above) are central to Armenian, Cyrillic, Greek, and Latin type + * designs. Even when the rasterizer uses greyscale anti-aliasing instead + * of color (a necessary compromise when one doesn't know the screen + * characteristics), the unhinted vertical features preserve the design's + * weight and spacing much better than aliased type would. + * + * 2) Alignment in the vertical direction: Weights and spacing along the + * y~axis are less critical; what is much more important is the visual + * alignment of related features (like cap-height and x-height). The + * sense of alignment for these is enhanced by the sharpness of grid-fit + * edges, while the cruder vertical resolution (full pixels instead of + * 1/3 pixels) is less of a problem. + * + * On the technical side, horizontal alignment zones for ascender, + * x-height, and other important height values (traditionally called + * 'blue zones') as defined in the font are positioned independently, + * each being rounded to the nearest pixel edge, taking care of overshoot + * suppression at small sizes, stem darkening, and scaling. + * + * Hstems (that is, hint values defined in the font to help align + * horizontal features) that fall within a blue zone are said to be + * 'captured' and are aligned to that zone. Uncaptured stems are moved + * in one of four ways, top edge up or down, bottom edge up or down. + * Unless there are conflicting hstems, the smallest movement is taken to + * minimize distortion. + * + */ + + + /************************************************************************** + * + * @section: + * pcf_driver + * + * @title: + * The PCF driver + * + * @abstract: + * Controlling the PCF driver module. + * + * @description: + * While FreeType's PCF driver doesn't expose API functions by itself, it + * is possible to control its behaviour with @FT_Property_Set and + * @FT_Property_Get. Right now, there is a single property + * @no-long-family-names available if FreeType is compiled with + * PCF_CONFIG_OPTION_LONG_FAMILY_NAMES. + * + * The PCF driver's module name is 'pcf'. + * + */ + + + /************************************************************************** + * + * @section: + * t1_cid_driver + * + * @title: + * The Type 1 and CID drivers + * + * @abstract: + * Controlling the Type~1 and CID driver modules. + * + * @description: + * It is possible to control the behaviour of FreeType's Type~1 and + * Type~1 CID drivers with @FT_Property_Set and @FT_Property_Get. + * + * Behind the scenes, both drivers use the Adobe CFF engine for hinting; + * however, the used properties must be specified separately. + * + * The Type~1 driver's module name is 'type1'; the CID driver's module + * name is 't1cid'. + * + * Available properties are @hinting-engine, @no-stem-darkening, + * @darkening-parameters, and @random-seed, as documented in the + * @properties section. + * + * Please see the @cff_driver section for more details on the new hinting + * engine. + * + */ + + + /************************************************************************** + * + * @section: + * tt_driver + * + * @title: + * The TrueType driver + * + * @abstract: + * Controlling the TrueType driver module. + * + * @description: + * While FreeType's TrueType driver doesn't expose API functions by + * itself, it is possible to control its behaviour with @FT_Property_Set + * and @FT_Property_Get. + * + * The TrueType driver's module name is 'truetype'; a single property + * @interpreter-version is available, as documented in the @properties + * section. + * + * To help understand the differences between interpreter versions, we + * introduce a list of definitions, kindly provided by Greg Hitchcock. + * + * _Bi-Level Rendering_ + * + * Monochromatic rendering, exclusively used in the early days of + * TrueType by both Apple and Microsoft. Microsoft's GDI interface + * supported hinting of the right-side bearing point, such that the + * advance width could be non-linear. Most often this was done to + * achieve some level of glyph symmetry. To enable reasonable + * performance (e.g., not having to run hinting on all glyphs just to get + * the widths) there was a bit in the head table indicating if the side + * bearing was hinted, and additional tables, 'hdmx' and 'LTSH', to cache + * hinting widths across multiple sizes and device aspect ratios. + * + * _Font Smoothing_ + * + * Microsoft's GDI implementation of anti-aliasing. Not traditional + * anti-aliasing as the outlines were hinted before the sampling. The + * widths matched the bi-level rendering. + * + * _ClearType Rendering_ + * + * Technique that uses physical subpixels to improve rendering on LCD + * (and other) displays. Because of the higher resolution, many methods + * of improving symmetry in glyphs through hinting the right-side bearing + * were no longer necessary. This lead to what GDI calls 'natural + * widths' ClearType, see + * http://rastertragedy.com/RTRCh4.htm#Sec21. Since hinting + * has extra resolution, most non-linearity went away, but it is still + * possible for hints to change the advance widths in this mode. + * + * _ClearType Compatible Widths_ + * + * One of the earliest challenges with ClearType was allowing the + * implementation in GDI to be selected without requiring all UI and + * documents to reflow. To address this, a compatible method of + * rendering ClearType was added where the font hints are executed once + * to determine the width in bi-level rendering, and then re-run in + * ClearType, with the difference in widths being absorbed in the font + * hints for ClearType (mostly in the white space of hints); see + * http://rastertragedy.com/RTRCh4.htm#Sec20. Somewhat by + * definition, compatible width ClearType allows for non-linear widths, + * but only when the bi-level version has non-linear widths. + * + * _ClearType Subpixel Positioning_ + * + * One of the nice benefits of ClearType is the ability to more crisply + * display fractional widths; unfortunately, the GDI model of integer + * bitmaps did not support this. However, the WPF and Direct Write + * frameworks do support fractional widths. DWrite calls this 'natural + * mode', not to be confused with GDI's 'natural widths'. Subpixel + * positioning, in the current implementation of Direct Write, + * unfortunately does not support hinted advance widths, see + * http://rastertragedy.com/RTRCh4.htm#Sec22. Note that the + * TrueType interpreter fully allows the advance width to be adjusted in + * this mode, just the DWrite client will ignore those changes. + * + * _ClearType Backward Compatibility_ + * + * This is a set of exceptions made in the TrueType interpreter to + * minimize hinting techniques that were problematic with the extra + * resolution of ClearType; see + * http://rastertragedy.com/RTRCh4.htm#Sec1 and + * https://www.microsoft.com/typography/cleartype/truetypecleartype.aspx. + * This technique is not to be confused with ClearType compatible widths. + * ClearType backward compatibility has no direct impact on changing + * advance widths, but there might be an indirect impact on disabling + * some deltas. This could be worked around in backward compatibility + * mode. + * + * _Native ClearType Mode_ + * + * (Not to be confused with 'natural widths'.) This mode removes all the + * exceptions in the TrueType interpreter when running with ClearType. + * Any issues on widths would still apply, though. + * + */ + + + /************************************************************************** + * + * @section: + * ot_svg_driver + * + * @title: + * The SVG driver + * + * @abstract: + * Controlling the external rendering of OT-SVG glyphs. + * + * @description: + * By default, FreeType can only load the 'SVG~' table of OpenType fonts + * if configuration macro `FT_CONFIG_OPTION_SVG` is defined. To make it + * render SVG glyphs, an external SVG rendering library is needed. All + * details on the interface between FreeType and the external library + * via function hooks can be found in section @svg_fonts. + * + * The OT-SVG driver's module name is 'ot-svg'; it supports a single + * property called @svg-hooks, documented below in the @properties + * section. + * + */ + + + /************************************************************************** + * + * @section: + * properties + * + * @title: + * Driver properties + * + * @abstract: + * Controlling driver modules. + * + * @description: + * Driver modules can be controlled by setting and unsetting properties, + * using the functions @FT_Property_Set and @FT_Property_Get. This + * section documents the available properties, together with auxiliary + * macros and structures. + * + */ + + + /************************************************************************** + * + * @enum: + * FT_HINTING_XXX + * + * @description: + * A list of constants used for the @hinting-engine property to select + * the hinting engine for CFF, Type~1, and CID fonts. + * + * @values: + * FT_HINTING_FREETYPE :: + * Use the old FreeType hinting engine. + * + * FT_HINTING_ADOBE :: + * Use the hinting engine contributed by Adobe. + * + * @since: + * 2.9 + * + */ +#define FT_HINTING_FREETYPE 0 +#define FT_HINTING_ADOBE 1 + + /* these constants (introduced in 2.4.12) are deprecated */ +#define FT_CFF_HINTING_FREETYPE FT_HINTING_FREETYPE +#define FT_CFF_HINTING_ADOBE FT_HINTING_ADOBE + + + /************************************************************************** + * + * @property: + * hinting-engine + * + * @description: + * Thanks to Adobe, which contributed a new hinting (and parsing) engine, + * an application can select between 'freetype' and 'adobe' if compiled + * with `CFF_CONFIG_OPTION_OLD_ENGINE`. If this configuration macro + * isn't defined, 'hinting-engine' does nothing. + * + * The same holds for the Type~1 and CID modules if compiled with + * `T1_CONFIG_OPTION_OLD_ENGINE`. + * + * For the 'cff' module, the default engine is 'adobe'. For both the + * 'type1' and 't1cid' modules, the default engine is 'adobe', too. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * This property can be set via the `FREETYPE_PROPERTIES` environment + * variable (using values 'adobe' or 'freetype'). + * + * @example: + * The following example code demonstrates how to select Adobe's hinting + * engine for the 'cff' module (omitting the error handling). + * + * ``` + * FT_Library library; + * FT_UInt hinting_engine = FT_HINTING_ADOBE; + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "cff", + * "hinting-engine", &hinting_engine ); + * ``` + * + * @since: + * 2.4.12 (for 'cff' module) + * + * 2.9 (for 'type1' and 't1cid' modules) + * + */ + + + /************************************************************************** + * + * @property: + * no-stem-darkening + * + * @description: + * All glyphs that pass through the auto-hinter will be emboldened unless + * this property is set to TRUE. The same is true for the CFF, Type~1, + * and CID font modules if the 'Adobe' engine is selected (which is the + * default). + * + * Stem darkening emboldens glyphs at smaller sizes to make them more + * readable on common low-DPI screens when using linear alpha blending + * and gamma correction, see @FT_Render_Glyph. When not using linear + * alpha blending and gamma correction, glyphs will appear heavy and + * fuzzy! + * + * Gamma correction essentially lightens fonts since shades of grey are + * shifted to higher pixel values (=~higher brightness) to match the + * original intention to the reality of our screens. The side-effect is + * that glyphs 'thin out'. Mac OS~X and Adobe's proprietary font + * rendering library implement a counter-measure: stem darkening at + * smaller sizes where shades of gray dominate. By emboldening a glyph + * slightly in relation to its pixel size, individual pixels get higher + * coverage of filled-in outlines and are therefore 'blacker'. This + * counteracts the 'thinning out' of glyphs, making text remain readable + * at smaller sizes. + * + * For the auto-hinter, stem-darkening is experimental currently and thus + * switched off by default (that is, `no-stem-darkening` is set to TRUE + * by default). Total consistency with the CFF driver is not achieved + * right now because the emboldening method differs and glyphs must be + * scaled down on the Y-axis to keep outline points inside their + * precomputed blue zones. The smaller the size (especially 9ppem and + * down), the higher the loss of emboldening versus the CFF driver. + * + * Note that stem darkening is never applied if @FT_LOAD_NO_SCALE is set. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * This property can be set via the `FREETYPE_PROPERTIES` environment + * variable (using values 1 and 0 for 'on' and 'off', respectively). It + * can also be set per face using @FT_Face_Properties with + * @FT_PARAM_TAG_STEM_DARKENING. + * + * @example: + * ``` + * FT_Library library; + * FT_Bool no_stem_darkening = TRUE; + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "cff", + * "no-stem-darkening", &no_stem_darkening ); + * ``` + * + * @since: + * 2.4.12 (for 'cff' module) + * + * 2.6.2 (for 'autofitter' module) + * + * 2.9 (for 'type1' and 't1cid' modules) + * + */ + + + /************************************************************************** + * + * @property: + * darkening-parameters + * + * @description: + * By default, the Adobe hinting engine, as used by the CFF, Type~1, and + * CID font drivers, darkens stems as follows (if the `no-stem-darkening` + * property isn't set): + * + * ``` + * stem width <= 0.5px: darkening amount = 0.4px + * stem width = 1px: darkening amount = 0.275px + * stem width = 1.667px: darkening amount = 0.275px + * stem width >= 2.333px: darkening amount = 0px + * ``` + * + * and piecewise linear in-between. At configuration time, these four + * control points can be set with the macro + * `CFF_CONFIG_OPTION_DARKENING_PARAMETERS`; the CFF, Type~1, and CID + * drivers share these values. At runtime, the control points can be + * changed using the `darkening-parameters` property (see the example + * below that demonstrates this for the Type~1 driver). + * + * The x~values give the stem width, and the y~values the darkening + * amount. The unit is 1000th of pixels. All coordinate values must be + * positive; the x~values must be monotonically increasing; the y~values + * must be monotonically decreasing and smaller than or equal to 500 + * (corresponding to half a pixel); the slope of each linear piece must + * be shallower than -1 (e.g., -.4). + * + * The auto-hinter provides this property, too, as an experimental + * feature. See @no-stem-darkening for more. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * This property can be set via the `FREETYPE_PROPERTIES` environment + * variable, using eight comma-separated integers without spaces. Here + * the above example, using `\` to break the line for readability. + * + * ``` + * FREETYPE_PROPERTIES=\ + * type1:darkening-parameters=500,300,1000,200,1500,100,2000,0 + * ``` + * + * @example: + * ``` + * FT_Library library; + * FT_Int darken_params[8] = { 500, 300, // x1, y1 + * 1000, 200, // x2, y2 + * 1500, 100, // x3, y3 + * 2000, 0 }; // x4, y4 + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "type1", + * "darkening-parameters", darken_params ); + * ``` + * + * @since: + * 2.5.1 (for 'cff' module) + * + * 2.6.2 (for 'autofitter' module) + * + * 2.9 (for 'type1' and 't1cid' modules) + * + */ + + + /************************************************************************** + * + * @property: + * random-seed + * + * @description: + * By default, the seed value for the CFF 'random' operator and the + * similar '0 28 callothersubr pop' command for the Type~1 and CID + * drivers is set to a random value. However, mainly for debugging + * purposes, it is often necessary to use a known value as a seed so that + * the pseudo-random number sequences generated by 'random' are + * repeatable. + * + * The `random-seed` property does that. Its argument is a signed 32bit + * integer; if the value is zero or negative, the seed given by the + * `intitialRandomSeed` private DICT operator in a CFF file gets used (or + * a default value if there is no such operator). If the value is + * positive, use it instead of `initialRandomSeed`, which is consequently + * ignored. + * + * @note: + * This property can be set via the `FREETYPE_PROPERTIES` environment + * variable. It can also be set per face using @FT_Face_Properties with + * @FT_PARAM_TAG_RANDOM_SEED. + * + * @since: + * 2.8 (for 'cff' module) + * + * 2.9 (for 'type1' and 't1cid' modules) + * + */ + + + /************************************************************************** + * + * @property: + * no-long-family-names + * + * @description: + * If `PCF_CONFIG_OPTION_LONG_FAMILY_NAMES` is active while compiling + * FreeType, the PCF driver constructs long family names. + * + * There are many PCF fonts just called 'Fixed' which look completely + * different, and which have nothing to do with each other. When + * selecting 'Fixed' in KDE or Gnome one gets results that appear rather + * random, the style changes often if one changes the size and one cannot + * select some fonts at all. The improve this situation, the PCF module + * prepends the foundry name (plus a space) to the family name. It also + * checks whether there are 'wide' characters; all put together, family + * names like 'Sony Fixed' or 'Misc Fixed Wide' are constructed. + * + * If `no-long-family-names` is set, this feature gets switched off. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * This property can be set via the `FREETYPE_PROPERTIES` environment + * variable (using values 1 and 0 for 'on' and 'off', respectively). + * + * @example: + * ``` + * FT_Library library; + * FT_Bool no_long_family_names = TRUE; + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "pcf", + * "no-long-family-names", + * &no_long_family_names ); + * ``` + * + * @since: + * 2.8 + */ + + + /************************************************************************** + * + * @enum: + * TT_INTERPRETER_VERSION_XXX + * + * @description: + * A list of constants used for the @interpreter-version property to + * select the hinting engine for Truetype fonts. + * + * The numeric value in the constant names represents the version number + * as returned by the 'GETINFO' bytecode instruction. + * + * @values: + * TT_INTERPRETER_VERSION_35 :: + * Version~35 corresponds to MS rasterizer v.1.7 as used e.g. in + * Windows~98; only grayscale and B/W rasterizing is supported. + * + * TT_INTERPRETER_VERSION_38 :: + * Version~38 is the same Version~40. The original 'Infinality' code is + * no longer available. + * + * TT_INTERPRETER_VERSION_40 :: + * Version~40 corresponds to MS rasterizer v.2.1; it is roughly + * equivalent to the hinting provided by DirectWrite ClearType (as can + * be found, for example, in Microsoft's Edge Browser on Windows~10). + * It is used in FreeType to select the 'minimal' subpixel hinting + * code, a stripped-down and higher performance version of the + * 'Infinality' code. + * + * @note: + * This property controls the behaviour of the bytecode interpreter and + * thus how outlines get hinted. It does **not** control how glyph get + * rasterized! In particular, it does not control subpixel color + * filtering. + * + * If FreeType has not been compiled with the configuration option + * `TT_CONFIG_OPTION_SUBPIXEL_HINTING`, selecting version~38 or~40 causes + * an `FT_Err_Unimplemented_Feature` error. + * + * Depending on the graphics framework, Microsoft uses different bytecode + * and rendering engines. As a consequence, the version numbers returned + * by a call to the 'GETINFO' bytecode instruction are more convoluted + * than desired. + * + * Here are two tables that try to shed some light on the possible values + * for the MS rasterizer engine, together with the additional features + * introduced by it. + * + * ``` + * GETINFO framework version feature + * ------------------------------------------------------------------- + * 3 GDI (Win 3.1), v1.0 16-bit, first version + * TrueImage + * 33 GDI (Win NT 3.1), v1.5 32-bit + * HP Laserjet + * 34 GDI (Win 95) v1.6 font smoothing, + * new SCANTYPE opcode + * 35 GDI (Win 98/2000) v1.7 (UN)SCALED_COMPONENT_OFFSET + * bits in composite glyphs + * 36 MGDI (Win CE 2) v1.6+ classic ClearType + * 37 GDI (XP and later), v1.8 ClearType + * GDI+ old (before Vista) + * 38 GDI+ old (Vista, Win 7), v1.9 subpixel ClearType, + * WPF Y-direction ClearType, + * additional error checking + * 39 DWrite (before Win 8) v2.0 subpixel ClearType flags + * in GETINFO opcode, + * bug fixes + * 40 GDI+ (after Win 7), v2.1 Y-direction ClearType flag + * DWrite (Win 8) in GETINFO opcode, + * Gray ClearType + * ``` + * + * The 'version' field gives a rough orientation only, since some + * applications provided certain features much earlier (as an example, + * Microsoft Reader used subpixel and Y-direction ClearType already in + * Windows 2000). Similarly, updates to a given framework might include + * improved hinting support. + * + * ``` + * version sampling rendering comment + * x y x y + * -------------------------------------------------------------- + * v1.0 normal normal B/W B/W bi-level + * v1.6 high high gray gray grayscale + * v1.8 high normal color-filter B/W (GDI) ClearType + * v1.9 high high color-filter gray Color ClearType + * v2.1 high normal gray B/W Gray ClearType + * v2.1 high high gray gray Gray ClearType + * ``` + * + * Color and Gray ClearType are the two available variants of + * 'Y-direction ClearType', meaning grayscale rasterization along the + * Y-direction; the name used in the TrueType specification for this + * feature is 'symmetric smoothing'. 'Classic ClearType' is the original + * algorithm used before introducing a modified version in Win~XP. + * Another name for v1.6's grayscale rendering is 'font smoothing', and + * 'Color ClearType' is sometimes also called 'DWrite ClearType'. To + * differentiate between today's Color ClearType and the earlier + * ClearType variant with B/W rendering along the vertical axis, the + * latter is sometimes called 'GDI ClearType'. + * + * 'Normal' and 'high' sampling describe the (virtual) resolution to + * access the rasterized outline after the hinting process. 'Normal' + * means 1 sample per grid line (i.e., B/W). In the current Microsoft + * implementation, 'high' means an extra virtual resolution of 16x16 (or + * 16x1) grid lines per pixel for bytecode instructions like 'MIRP'. + * After hinting, these 16 grid lines are mapped to 6x5 (or 6x1) grid + * lines for color filtering if Color ClearType is activated. + * + * Note that 'Gray ClearType' is essentially the same as v1.6's grayscale + * rendering. However, the GETINFO instruction handles it differently: + * v1.6 returns bit~12 (hinting for grayscale), while v2.1 returns + * bits~13 (hinting for ClearType), 18 (symmetrical smoothing), and~19 + * (Gray ClearType). Also, this mode respects bits 2 and~3 for the + * version~1 gasp table exclusively (like Color ClearType), while v1.6 + * only respects the values of version~0 (bits 0 and~1). + * + * Keep in mind that the features of the above interpreter versions might + * not map exactly to FreeType features or behavior because it is a + * fundamentally different library with different internals. + * + */ +#define TT_INTERPRETER_VERSION_35 35 +#define TT_INTERPRETER_VERSION_38 38 +#define TT_INTERPRETER_VERSION_40 40 + + + /************************************************************************** + * + * @property: + * interpreter-version + * + * @description: + * Currently, three versions are available, two representing the bytecode + * interpreter with subpixel hinting support (old 'Infinality' code and + * new stripped-down and higher performance 'minimal' code) and one + * without, respectively. The default is subpixel support if + * `TT_CONFIG_OPTION_SUBPIXEL_HINTING` is defined, and no subpixel + * support otherwise (since it isn't available then). + * + * If subpixel hinting is on, many TrueType bytecode instructions behave + * differently compared to B/W or grayscale rendering (except if 'native + * ClearType' is selected by the font). Microsoft's main idea is to + * render at a much increased horizontal resolution, then sampling down + * the created output to subpixel precision. However, many older fonts + * are not suited to this and must be specially taken care of by applying + * (hardcoded) tweaks in Microsoft's interpreter. + * + * Details on subpixel hinting and some of the necessary tweaks can be + * found in Greg Hitchcock's whitepaper at + * 'https://www.microsoft.com/typography/cleartype/truetypecleartype.aspx'. + * Note that FreeType currently doesn't really 'subpixel hint' (6x1, 6x2, + * or 6x5 supersampling) like discussed in the paper. Depending on the + * chosen interpreter, it simply ignores instructions on vertical stems + * to arrive at very similar results. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * This property can be set via the `FREETYPE_PROPERTIES` environment + * variable (using values '35', '38', or '40'). + * + * @example: + * The following example code demonstrates how to deactivate subpixel + * hinting (omitting the error handling). + * + * ``` + * FT_Library library; + * FT_Face face; + * FT_UInt interpreter_version = TT_INTERPRETER_VERSION_35; + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "truetype", + * "interpreter-version", + * &interpreter_version ); + * ``` + * + * @since: + * 2.5 + */ + + /************************************************************************** + * + * @property: + * svg-hooks + * + * @description: + * Set up the interface between FreeType and an extern SVG rendering + * library like 'librsvg'. All details on the function hooks can be + * found in section @svg_fonts. + * + * @example: + * The following example code expects that the four hook functions + * `svg_*` are defined elsewhere. Error handling is omitted, too. + * + * ``` + * FT_Library library; + * SVG_RendererHooks hooks = { + * (SVG_Lib_Init_Func)svg_init, + * (SVG_Lib_Free_Func)svg_free, + * (SVG_Lib_Render_Func)svg_render, + * (SVG_Lib_Preset_Slot_Func)svg_preset_slot }; + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "ot-svg", + * "svg-hooks", &hooks ); + * ``` + * + * @since: + * 2.12 + */ + + + /************************************************************************** + * + * @property: + * glyph-to-script-map + * + * @description: + * **Experimental only** + * + * The auto-hinter provides various script modules to hint glyphs. + * Examples of supported scripts are Latin or CJK. Before a glyph is + * auto-hinted, the Unicode character map of the font gets examined, and + * the script is then determined based on Unicode character ranges, see + * below. + * + * OpenType fonts, however, often provide much more glyphs than character + * codes (small caps, superscripts, ligatures, swashes, etc.), to be + * controlled by so-called 'features'. Handling OpenType features can be + * quite complicated and thus needs a separate library on top of + * FreeType. + * + * The mapping between glyph indices and scripts (in the auto-hinter + * sense, see the @FT_AUTOHINTER_SCRIPT_XXX values) is stored as an array + * with `num_glyphs` elements, as found in the font's @FT_Face structure. + * The `glyph-to-script-map` property returns a pointer to this array, + * which can be modified as needed. Note that the modification should + * happen before the first glyph gets processed by the auto-hinter so + * that the global analysis of the font shapes actually uses the modified + * mapping. + * + * @example: + * The following example code demonstrates how to access it (omitting the + * error handling). + * + * ``` + * FT_Library library; + * FT_Face face; + * FT_Prop_GlyphToScriptMap prop; + * + * + * FT_Init_FreeType( &library ); + * FT_New_Face( library, "foo.ttf", 0, &face ); + * + * prop.face = face; + * + * FT_Property_Get( library, "autofitter", + * "glyph-to-script-map", &prop ); + * + * // adjust `prop.map' as needed right here + * + * FT_Load_Glyph( face, ..., FT_LOAD_FORCE_AUTOHINT ); + * ``` + * + * @since: + * 2.4.11 + * + */ + + + /************************************************************************** + * + * @enum: + * FT_AUTOHINTER_SCRIPT_XXX + * + * @description: + * **Experimental only** + * + * A list of constants used for the @glyph-to-script-map property to + * specify the script submodule the auto-hinter should use for hinting a + * particular glyph. + * + * @values: + * FT_AUTOHINTER_SCRIPT_NONE :: + * Don't auto-hint this glyph. + * + * FT_AUTOHINTER_SCRIPT_LATIN :: + * Apply the latin auto-hinter. For the auto-hinter, 'latin' is a very + * broad term, including Cyrillic and Greek also since characters from + * those scripts share the same design constraints. + * + * By default, characters from the following Unicode ranges are + * assigned to this submodule. + * + * ``` + * U+0020 - U+007F // Basic Latin (no control characters) + * U+00A0 - U+00FF // Latin-1 Supplement (no control characters) + * U+0100 - U+017F // Latin Extended-A + * U+0180 - U+024F // Latin Extended-B + * U+0250 - U+02AF // IPA Extensions + * U+02B0 - U+02FF // Spacing Modifier Letters + * U+0300 - U+036F // Combining Diacritical Marks + * U+0370 - U+03FF // Greek and Coptic + * U+0400 - U+04FF // Cyrillic + * U+0500 - U+052F // Cyrillic Supplement + * U+1D00 - U+1D7F // Phonetic Extensions + * U+1D80 - U+1DBF // Phonetic Extensions Supplement + * U+1DC0 - U+1DFF // Combining Diacritical Marks Supplement + * U+1E00 - U+1EFF // Latin Extended Additional + * U+1F00 - U+1FFF // Greek Extended + * U+2000 - U+206F // General Punctuation + * U+2070 - U+209F // Superscripts and Subscripts + * U+20A0 - U+20CF // Currency Symbols + * U+2150 - U+218F // Number Forms + * U+2460 - U+24FF // Enclosed Alphanumerics + * U+2C60 - U+2C7F // Latin Extended-C + * U+2DE0 - U+2DFF // Cyrillic Extended-A + * U+2E00 - U+2E7F // Supplemental Punctuation + * U+A640 - U+A69F // Cyrillic Extended-B + * U+A720 - U+A7FF // Latin Extended-D + * U+FB00 - U+FB06 // Alphab. Present. Forms (Latin Ligatures) + * U+1D400 - U+1D7FF // Mathematical Alphanumeric Symbols + * U+1F100 - U+1F1FF // Enclosed Alphanumeric Supplement + * ``` + * + * FT_AUTOHINTER_SCRIPT_CJK :: + * Apply the CJK auto-hinter, covering Chinese, Japanese, Korean, old + * Vietnamese, and some other scripts. + * + * By default, characters from the following Unicode ranges are + * assigned to this submodule. + * + * ``` + * U+1100 - U+11FF // Hangul Jamo + * U+2E80 - U+2EFF // CJK Radicals Supplement + * U+2F00 - U+2FDF // Kangxi Radicals + * U+2FF0 - U+2FFF // Ideographic Description Characters + * U+3000 - U+303F // CJK Symbols and Punctuation + * U+3040 - U+309F // Hiragana + * U+30A0 - U+30FF // Katakana + * U+3100 - U+312F // Bopomofo + * U+3130 - U+318F // Hangul Compatibility Jamo + * U+3190 - U+319F // Kanbun + * U+31A0 - U+31BF // Bopomofo Extended + * U+31C0 - U+31EF // CJK Strokes + * U+31F0 - U+31FF // Katakana Phonetic Extensions + * U+3200 - U+32FF // Enclosed CJK Letters and Months + * U+3300 - U+33FF // CJK Compatibility + * U+3400 - U+4DBF // CJK Unified Ideographs Extension A + * U+4DC0 - U+4DFF // Yijing Hexagram Symbols + * U+4E00 - U+9FFF // CJK Unified Ideographs + * U+A960 - U+A97F // Hangul Jamo Extended-A + * U+AC00 - U+D7AF // Hangul Syllables + * U+D7B0 - U+D7FF // Hangul Jamo Extended-B + * U+F900 - U+FAFF // CJK Compatibility Ideographs + * U+FE10 - U+FE1F // Vertical forms + * U+FE30 - U+FE4F // CJK Compatibility Forms + * U+FF00 - U+FFEF // Halfwidth and Fullwidth Forms + * U+1B000 - U+1B0FF // Kana Supplement + * U+1D300 - U+1D35F // Tai Xuan Hing Symbols + * U+1F200 - U+1F2FF // Enclosed Ideographic Supplement + * U+20000 - U+2A6DF // CJK Unified Ideographs Extension B + * U+2A700 - U+2B73F // CJK Unified Ideographs Extension C + * U+2B740 - U+2B81F // CJK Unified Ideographs Extension D + * U+2F800 - U+2FA1F // CJK Compatibility Ideographs Supplement + * ``` + * + * FT_AUTOHINTER_SCRIPT_INDIC :: + * Apply the indic auto-hinter, covering all major scripts from the + * Indian sub-continent and some other related scripts like Thai, Lao, + * or Tibetan. + * + * By default, characters from the following Unicode ranges are + * assigned to this submodule. + * + * ``` + * U+0900 - U+0DFF // Indic Range + * U+0F00 - U+0FFF // Tibetan + * U+1900 - U+194F // Limbu + * U+1B80 - U+1BBF // Sundanese + * U+A800 - U+A82F // Syloti Nagri + * U+ABC0 - U+ABFF // Meetei Mayek + * U+11800 - U+118DF // Sharada + * ``` + * + * Note that currently Indic support is rudimentary only, missing blue + * zone support. + * + * @since: + * 2.4.11 + * + */ +#define FT_AUTOHINTER_SCRIPT_NONE 0 +#define FT_AUTOHINTER_SCRIPT_LATIN 1 +#define FT_AUTOHINTER_SCRIPT_CJK 2 +#define FT_AUTOHINTER_SCRIPT_INDIC 3 + + + /************************************************************************** + * + * @struct: + * FT_Prop_GlyphToScriptMap + * + * @description: + * **Experimental only** + * + * The data exchange structure for the @glyph-to-script-map property. + * + * @since: + * 2.4.11 + * + */ + typedef struct FT_Prop_GlyphToScriptMap_ + { + FT_Face face; + FT_UShort* map; + + } FT_Prop_GlyphToScriptMap; + + + /************************************************************************** + * + * @property: + * fallback-script + * + * @description: + * **Experimental only** + * + * If no auto-hinter script module can be assigned to a glyph, a fallback + * script gets assigned to it (see also the @glyph-to-script-map + * property). By default, this is @FT_AUTOHINTER_SCRIPT_CJK. Using the + * `fallback-script` property, this fallback value can be changed. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * It's important to use the right timing for changing this value: The + * creation of the glyph-to-script map that eventually uses the fallback + * script value gets triggered either by setting or reading a + * face-specific property like @glyph-to-script-map, or by auto-hinting + * any glyph from that face. In particular, if you have already created + * an @FT_Face structure but not loaded any glyph (using the + * auto-hinter), a change of the fallback script will affect this face. + * + * @example: + * ``` + * FT_Library library; + * FT_UInt fallback_script = FT_AUTOHINTER_SCRIPT_NONE; + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "autofitter", + * "fallback-script", &fallback_script ); + * ``` + * + * @since: + * 2.4.11 + * + */ + + + /************************************************************************** + * + * @property: + * default-script + * + * @description: + * **Experimental only** + * + * If FreeType gets compiled with `FT_CONFIG_OPTION_USE_HARFBUZZ` to make + * the HarfBuzz library access OpenType features for getting better glyph + * coverages, this property sets the (auto-fitter) script to be used for + * the default (OpenType) script data of a font's GSUB table. Features + * for the default script are intended for all scripts not explicitly + * handled in GSUB; an example is a 'dlig' feature, containing the + * combination of the characters 'T', 'E', and 'L' to form a 'TEL' + * ligature. + * + * By default, this is @FT_AUTOHINTER_SCRIPT_LATIN. Using the + * `default-script` property, this default value can be changed. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * It's important to use the right timing for changing this value: The + * creation of the glyph-to-script map that eventually uses the default + * script value gets triggered either by setting or reading a + * face-specific property like @glyph-to-script-map, or by auto-hinting + * any glyph from that face. In particular, if you have already created + * an @FT_Face structure but not loaded any glyph (using the + * auto-hinter), a change of the default script will affect this face. + * + * @example: + * ``` + * FT_Library library; + * FT_UInt default_script = FT_AUTOHINTER_SCRIPT_NONE; + * + * + * FT_Init_FreeType( &library ); + * + * FT_Property_Set( library, "autofitter", + * "default-script", &default_script ); + * ``` + * + * @since: + * 2.5.3 + * + */ + + + /************************************************************************** + * + * @property: + * increase-x-height + * + * @description: + * For ppem values in the range 6~<= ppem <= `increase-x-height`, round + * up the font's x~height much more often than normally. If the value is + * set to~0, which is the default, this feature is switched off. Use + * this property to improve the legibility of small font sizes if + * necessary. + * + * @note: + * This property can be used with @FT_Property_Get also. + * + * Set this value right after calling @FT_Set_Char_Size, but before + * loading any glyph (using the auto-hinter). + * + * @example: + * ``` + * FT_Library library; + * FT_Face face; + * FT_Prop_IncreaseXHeight prop; + * + * + * FT_Init_FreeType( &library ); + * FT_New_Face( library, "foo.ttf", 0, &face ); + * FT_Set_Char_Size( face, 10 * 64, 0, 72, 0 ); + * + * prop.face = face; + * prop.limit = 14; + * + * FT_Property_Set( library, "autofitter", + * "increase-x-height", &prop ); + * ``` + * + * @since: + * 2.4.11 + * + */ + + + /************************************************************************** + * + * @struct: + * FT_Prop_IncreaseXHeight + * + * @description: + * The data exchange structure for the @increase-x-height property. + * + */ + typedef struct FT_Prop_IncreaseXHeight_ + { + FT_Face face; + FT_UInt limit; + + } FT_Prop_IncreaseXHeight; + + + /************************************************************************** + * + * @property: + * warping + * + * @description: + * **Obsolete** + * + * This property was always experimental and probably never worked + * correctly. It was entirely removed from the FreeType~2 sources. This + * entry is only here for historical reference. + * + * Warping only worked in 'normal' auto-hinting mode replacing it. The + * idea of the code was to slightly scale and shift a glyph along the + * non-hinted dimension (which is usually the horizontal axis) so that as + * much of its segments were aligned (more or less) to the grid. To find + * out a glyph's optimal scaling and shifting value, various parameter + * combinations were tried and scored. + * + * @since: + * 2.6 + * + */ + + + /* */ + + +FT_END_HEADER + + +#endif /* FTDRIVER_H_ */ + + +/* END */ diff --git a/Example/include/freetype/fterrdef.h b/Example/include/freetype/fterrdef.h new file mode 100644 index 0000000..d59b3cc --- /dev/null +++ b/Example/include/freetype/fterrdef.h @@ -0,0 +1,283 @@ +/**************************************************************************** + * + * fterrdef.h + * + * FreeType error codes (specification). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * @section: + * error_code_values + * + * @title: + * Error Code Values + * + * @abstract: + * All possible error codes returned by FreeType functions. + * + * @description: + * The list below is taken verbatim from the file `fterrdef.h` (loaded + * automatically by including `FT_FREETYPE_H`). The first argument of the + * `FT_ERROR_DEF_` macro is the error label; by default, the prefix + * `FT_Err_` gets added so that you get error names like + * `FT_Err_Cannot_Open_Resource`. The second argument is the error code, + * and the last argument an error string, which is not used by FreeType. + * + * Within your application you should **only** use error names and + * **never** its numeric values! The latter might (and actually do) + * change in forthcoming FreeType versions. + * + * Macro `FT_NOERRORDEF_` defines `FT_Err_Ok`, which is always zero. See + * the 'Error Enumerations' subsection how to automatically generate a + * list of error strings. + * + */ + + + /************************************************************************** + * + * @enum: + * FT_Err_XXX + * + */ + + /* generic errors */ + + FT_NOERRORDEF_( Ok, 0x00, + "no error" ) + + FT_ERRORDEF_( Cannot_Open_Resource, 0x01, + "cannot open resource" ) + FT_ERRORDEF_( Unknown_File_Format, 0x02, + "unknown file format" ) + FT_ERRORDEF_( Invalid_File_Format, 0x03, + "broken file" ) + FT_ERRORDEF_( Invalid_Version, 0x04, + "invalid FreeType version" ) + FT_ERRORDEF_( Lower_Module_Version, 0x05, + "module version is too low" ) + FT_ERRORDEF_( Invalid_Argument, 0x06, + "invalid argument" ) + FT_ERRORDEF_( Unimplemented_Feature, 0x07, + "unimplemented feature" ) + FT_ERRORDEF_( Invalid_Table, 0x08, + "broken table" ) + FT_ERRORDEF_( Invalid_Offset, 0x09, + "broken offset within table" ) + FT_ERRORDEF_( Array_Too_Large, 0x0A, + "array allocation size too large" ) + FT_ERRORDEF_( Missing_Module, 0x0B, + "missing module" ) + FT_ERRORDEF_( Missing_Property, 0x0C, + "missing property" ) + + /* glyph/character errors */ + + FT_ERRORDEF_( Invalid_Glyph_Index, 0x10, + "invalid glyph index" ) + FT_ERRORDEF_( Invalid_Character_Code, 0x11, + "invalid character code" ) + FT_ERRORDEF_( Invalid_Glyph_Format, 0x12, + "unsupported glyph image format" ) + FT_ERRORDEF_( Cannot_Render_Glyph, 0x13, + "cannot render this glyph format" ) + FT_ERRORDEF_( Invalid_Outline, 0x14, + "invalid outline" ) + FT_ERRORDEF_( Invalid_Composite, 0x15, + "invalid composite glyph" ) + FT_ERRORDEF_( Too_Many_Hints, 0x16, + "too many hints" ) + FT_ERRORDEF_( Invalid_Pixel_Size, 0x17, + "invalid pixel size" ) + FT_ERRORDEF_( Invalid_SVG_Document, 0x18, + "invalid SVG document" ) + + /* handle errors */ + + FT_ERRORDEF_( Invalid_Handle, 0x20, + "invalid object handle" ) + FT_ERRORDEF_( Invalid_Library_Handle, 0x21, + "invalid library handle" ) + FT_ERRORDEF_( Invalid_Driver_Handle, 0x22, + "invalid module handle" ) + FT_ERRORDEF_( Invalid_Face_Handle, 0x23, + "invalid face handle" ) + FT_ERRORDEF_( Invalid_Size_Handle, 0x24, + "invalid size handle" ) + FT_ERRORDEF_( Invalid_Slot_Handle, 0x25, + "invalid glyph slot handle" ) + FT_ERRORDEF_( Invalid_CharMap_Handle, 0x26, + "invalid charmap handle" ) + FT_ERRORDEF_( Invalid_Cache_Handle, 0x27, + "invalid cache manager handle" ) + FT_ERRORDEF_( Invalid_Stream_Handle, 0x28, + "invalid stream handle" ) + + /* driver errors */ + + FT_ERRORDEF_( Too_Many_Drivers, 0x30, + "too many modules" ) + FT_ERRORDEF_( Too_Many_Extensions, 0x31, + "too many extensions" ) + + /* memory errors */ + + FT_ERRORDEF_( Out_Of_Memory, 0x40, + "out of memory" ) + FT_ERRORDEF_( Unlisted_Object, 0x41, + "unlisted object" ) + + /* stream errors */ + + FT_ERRORDEF_( Cannot_Open_Stream, 0x51, + "cannot open stream" ) + FT_ERRORDEF_( Invalid_Stream_Seek, 0x52, + "invalid stream seek" ) + FT_ERRORDEF_( Invalid_Stream_Skip, 0x53, + "invalid stream skip" ) + FT_ERRORDEF_( Invalid_Stream_Read, 0x54, + "invalid stream read" ) + FT_ERRORDEF_( Invalid_Stream_Operation, 0x55, + "invalid stream operation" ) + FT_ERRORDEF_( Invalid_Frame_Operation, 0x56, + "invalid frame operation" ) + FT_ERRORDEF_( Nested_Frame_Access, 0x57, + "nested frame access" ) + FT_ERRORDEF_( Invalid_Frame_Read, 0x58, + "invalid frame read" ) + + /* raster errors */ + + FT_ERRORDEF_( Raster_Uninitialized, 0x60, + "raster uninitialized" ) + FT_ERRORDEF_( Raster_Corrupted, 0x61, + "raster corrupted" ) + FT_ERRORDEF_( Raster_Overflow, 0x62, + "raster overflow" ) + FT_ERRORDEF_( Raster_Negative_Height, 0x63, + "negative height while rastering" ) + + /* cache errors */ + + FT_ERRORDEF_( Too_Many_Caches, 0x70, + "too many registered caches" ) + + /* TrueType and SFNT errors */ + + FT_ERRORDEF_( Invalid_Opcode, 0x80, + "invalid opcode" ) + FT_ERRORDEF_( Too_Few_Arguments, 0x81, + "too few arguments" ) + FT_ERRORDEF_( Stack_Overflow, 0x82, + "stack overflow" ) + FT_ERRORDEF_( Code_Overflow, 0x83, + "code overflow" ) + FT_ERRORDEF_( Bad_Argument, 0x84, + "bad argument" ) + FT_ERRORDEF_( Divide_By_Zero, 0x85, + "division by zero" ) + FT_ERRORDEF_( Invalid_Reference, 0x86, + "invalid reference" ) + FT_ERRORDEF_( Debug_OpCode, 0x87, + "found debug opcode" ) + FT_ERRORDEF_( ENDF_In_Exec_Stream, 0x88, + "found ENDF opcode in execution stream" ) + FT_ERRORDEF_( Nested_DEFS, 0x89, + "nested DEFS" ) + FT_ERRORDEF_( Invalid_CodeRange, 0x8A, + "invalid code range" ) + FT_ERRORDEF_( Execution_Too_Long, 0x8B, + "execution context too long" ) + FT_ERRORDEF_( Too_Many_Function_Defs, 0x8C, + "too many function definitions" ) + FT_ERRORDEF_( Too_Many_Instruction_Defs, 0x8D, + "too many instruction definitions" ) + FT_ERRORDEF_( Table_Missing, 0x8E, + "SFNT font table missing" ) + FT_ERRORDEF_( Horiz_Header_Missing, 0x8F, + "horizontal header (hhea) table missing" ) + FT_ERRORDEF_( Locations_Missing, 0x90, + "locations (loca) table missing" ) + FT_ERRORDEF_( Name_Table_Missing, 0x91, + "name table missing" ) + FT_ERRORDEF_( CMap_Table_Missing, 0x92, + "character map (cmap) table missing" ) + FT_ERRORDEF_( Hmtx_Table_Missing, 0x93, + "horizontal metrics (hmtx) table missing" ) + FT_ERRORDEF_( Post_Table_Missing, 0x94, + "PostScript (post) table missing" ) + FT_ERRORDEF_( Invalid_Horiz_Metrics, 0x95, + "invalid horizontal metrics" ) + FT_ERRORDEF_( Invalid_CharMap_Format, 0x96, + "invalid character map (cmap) format" ) + FT_ERRORDEF_( Invalid_PPem, 0x97, + "invalid ppem value" ) + FT_ERRORDEF_( Invalid_Vert_Metrics, 0x98, + "invalid vertical metrics" ) + FT_ERRORDEF_( Could_Not_Find_Context, 0x99, + "could not find context" ) + FT_ERRORDEF_( Invalid_Post_Table_Format, 0x9A, + "invalid PostScript (post) table format" ) + FT_ERRORDEF_( Invalid_Post_Table, 0x9B, + "invalid PostScript (post) table" ) + FT_ERRORDEF_( DEF_In_Glyf_Bytecode, 0x9C, + "found FDEF or IDEF opcode in glyf bytecode" ) + FT_ERRORDEF_( Missing_Bitmap, 0x9D, + "missing bitmap in strike" ) + FT_ERRORDEF_( Missing_SVG_Hooks, 0x9E, + "SVG hooks have not been set" ) + + /* CFF, CID, and Type 1 errors */ + + FT_ERRORDEF_( Syntax_Error, 0xA0, + "opcode syntax error" ) + FT_ERRORDEF_( Stack_Underflow, 0xA1, + "argument stack underflow" ) + FT_ERRORDEF_( Ignore, 0xA2, + "ignore" ) + FT_ERRORDEF_( No_Unicode_Glyph_Name, 0xA3, + "no Unicode glyph name found" ) + FT_ERRORDEF_( Glyph_Too_Big, 0xA4, + "glyph too big for hinting" ) + + /* BDF errors */ + + FT_ERRORDEF_( Missing_Startfont_Field, 0xB0, + "`STARTFONT' field missing" ) + FT_ERRORDEF_( Missing_Font_Field, 0xB1, + "`FONT' field missing" ) + FT_ERRORDEF_( Missing_Size_Field, 0xB2, + "`SIZE' field missing" ) + FT_ERRORDEF_( Missing_Fontboundingbox_Field, 0xB3, + "`FONTBOUNDINGBOX' field missing" ) + FT_ERRORDEF_( Missing_Chars_Field, 0xB4, + "`CHARS' field missing" ) + FT_ERRORDEF_( Missing_Startchar_Field, 0xB5, + "`STARTCHAR' field missing" ) + FT_ERRORDEF_( Missing_Encoding_Field, 0xB6, + "`ENCODING' field missing" ) + FT_ERRORDEF_( Missing_Bbx_Field, 0xB7, + "`BBX' field missing" ) + FT_ERRORDEF_( Bbx_Too_Big, 0xB8, + "`BBX' too big" ) + FT_ERRORDEF_( Corrupted_Font_Header, 0xB9, + "Font header corrupted or missing fields" ) + FT_ERRORDEF_( Corrupted_Font_Glyphs, 0xBA, + "Font glyphs corrupted or missing fields" ) + + /* */ + + +/* END */ diff --git a/Example/include/freetype/fterrors.h b/Example/include/freetype/fterrors.h new file mode 100644 index 0000000..15ef3f7 --- /dev/null +++ b/Example/include/freetype/fterrors.h @@ -0,0 +1,296 @@ +/**************************************************************************** + * + * fterrors.h + * + * FreeType error code handling (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * @section: + * error_enumerations + * + * @title: + * Error Enumerations + * + * @abstract: + * How to handle errors and error strings. + * + * @description: + * The header file `fterrors.h` (which is automatically included by + * `freetype.h`) defines the handling of FreeType's enumeration + * constants. It can also be used to generate error message strings + * with a small macro trick explained below. + * + * **Error Formats** + * + * The configuration macro `FT_CONFIG_OPTION_USE_MODULE_ERRORS` can be + * defined in `ftoption.h` in order to make the higher byte indicate the + * module where the error has happened (this is not compatible with + * standard builds of FreeType~2, however). See the file `ftmoderr.h` + * for more details. + * + * **Error Message Strings** + * + * Error definitions are set up with special macros that allow client + * applications to build a table of error message strings. The strings + * are not included in a normal build of FreeType~2 to save space (most + * client applications do not use them). + * + * To do so, you have to define the following macros before including + * this file. + * + * ``` + * FT_ERROR_START_LIST + * ``` + * + * This macro is called before anything else to define the start of the + * error list. It is followed by several `FT_ERROR_DEF` calls. + * + * ``` + * FT_ERROR_DEF( e, v, s ) + * ``` + * + * This macro is called to define one single error. 'e' is the error + * code identifier (e.g., `Invalid_Argument`), 'v' is the error's + * numerical value, and 's' is the corresponding error string. + * + * ``` + * FT_ERROR_END_LIST + * ``` + * + * This macro ends the list. + * + * Additionally, you have to undefine `FTERRORS_H_` before #including + * this file. + * + * Here is a simple example. + * + * ``` + * #undef FTERRORS_H_ + * #define FT_ERRORDEF( e, v, s ) { e, s }, + * #define FT_ERROR_START_LIST { + * #define FT_ERROR_END_LIST { 0, NULL } }; + * + * const struct + * { + * int err_code; + * const char* err_msg; + * } ft_errors[] = + * + * #include + * ``` + * + * An alternative to using an array is a switch statement. + * + * ``` + * #undef FTERRORS_H_ + * #define FT_ERROR_START_LIST switch ( error_code ) { + * #define FT_ERRORDEF( e, v, s ) case v: return s; + * #define FT_ERROR_END_LIST } + * ``` + * + * If you use `FT_CONFIG_OPTION_USE_MODULE_ERRORS`, `error_code` should + * be replaced with `FT_ERROR_BASE(error_code)` in the last example. + */ + + /* */ + + /* In previous FreeType versions we used `__FTERRORS_H__`. However, */ + /* using two successive underscores in a non-system symbol name */ + /* violates the C (and C++) standard, so it was changed to the */ + /* current form. In spite of this, we have to make */ + /* */ + /* ``` */ + /* #undefine __FTERRORS_H__ */ + /* ``` */ + /* */ + /* work for backward compatibility. */ + /* */ +#if !( defined( FTERRORS_H_ ) && defined ( __FTERRORS_H__ ) ) +#define FTERRORS_H_ +#define __FTERRORS_H__ + + + /* include module base error codes */ +#include + + + /*******************************************************************/ + /*******************************************************************/ + /***** *****/ + /***** SETUP MACROS *****/ + /***** *****/ + /*******************************************************************/ + /*******************************************************************/ + + +#undef FT_NEED_EXTERN_C + + + /* FT_ERR_PREFIX is used as a prefix for error identifiers. */ + /* By default, we use `FT_Err_`. */ + /* */ +#ifndef FT_ERR_PREFIX +#define FT_ERR_PREFIX FT_Err_ +#endif + + + /* FT_ERR_BASE is used as the base for module-specific errors. */ + /* */ +#ifdef FT_CONFIG_OPTION_USE_MODULE_ERRORS + +#ifndef FT_ERR_BASE +#define FT_ERR_BASE FT_Mod_Err_Base +#endif + +#else + +#undef FT_ERR_BASE +#define FT_ERR_BASE 0 + +#endif /* FT_CONFIG_OPTION_USE_MODULE_ERRORS */ + + + /* If FT_ERRORDEF is not defined, we need to define a simple */ + /* enumeration type. */ + /* */ +#ifndef FT_ERRORDEF + +#define FT_INCLUDE_ERR_PROTOS + +#define FT_ERRORDEF( e, v, s ) e = v, +#define FT_ERROR_START_LIST enum { +#define FT_ERROR_END_LIST FT_ERR_CAT( FT_ERR_PREFIX, Max ) }; + +#ifdef __cplusplus +#define FT_NEED_EXTERN_C + extern "C" { +#endif + +#endif /* !FT_ERRORDEF */ + + + /* this macro is used to define an error */ +#define FT_ERRORDEF_( e, v, s ) \ + FT_ERRORDEF( FT_ERR_CAT( FT_ERR_PREFIX, e ), v + FT_ERR_BASE, s ) + + /* this is only used for _Err_Ok, which must be 0! */ +#define FT_NOERRORDEF_( e, v, s ) \ + FT_ERRORDEF( FT_ERR_CAT( FT_ERR_PREFIX, e ), v, s ) + + +#ifdef FT_ERROR_START_LIST + FT_ERROR_START_LIST +#endif + + + /* now include the error codes */ +#include + + +#ifdef FT_ERROR_END_LIST + FT_ERROR_END_LIST +#endif + + + /*******************************************************************/ + /*******************************************************************/ + /***** *****/ + /***** SIMPLE CLEANUP *****/ + /***** *****/ + /*******************************************************************/ + /*******************************************************************/ + +#ifdef FT_NEED_EXTERN_C + } +#endif + +#undef FT_ERROR_START_LIST +#undef FT_ERROR_END_LIST + +#undef FT_ERRORDEF +#undef FT_ERRORDEF_ +#undef FT_NOERRORDEF_ + +#undef FT_NEED_EXTERN_C +#undef FT_ERR_BASE + + /* FT_ERR_PREFIX is needed internally */ +#ifndef FT2_BUILD_LIBRARY +#undef FT_ERR_PREFIX +#endif + + /* FT_INCLUDE_ERR_PROTOS: Control whether function prototypes should be */ + /* included with */ + /* */ + /* #include */ + /* */ + /* This is only true where `FT_ERRORDEF` is */ + /* undefined. */ + /* */ + /* FT_ERR_PROTOS_DEFINED: Actual multiple-inclusion protection of */ + /* `fterrors.h`. */ +#ifdef FT_INCLUDE_ERR_PROTOS +#undef FT_INCLUDE_ERR_PROTOS + +#ifndef FT_ERR_PROTOS_DEFINED +#define FT_ERR_PROTOS_DEFINED + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @function: + * FT_Error_String + * + * @description: + * Retrieve the description of a valid FreeType error code. + * + * @input: + * error_code :: + * A valid FreeType error code. + * + * @return: + * A C~string or `NULL`, if any error occurred. + * + * @note: + * FreeType has to be compiled with `FT_CONFIG_OPTION_ERROR_STRINGS` or + * `FT_DEBUG_LEVEL_ERROR` to get meaningful descriptions. + * 'error_string' will be `NULL` otherwise. + * + * Module identification will be ignored: + * + * ```c + * strcmp( FT_Error_String( FT_Err_Unknown_File_Format ), + * FT_Error_String( BDF_Err_Unknown_File_Format ) ) == 0; + * ``` + */ + FT_EXPORT( const char* ) + FT_Error_String( FT_Error error_code ); + + /* */ + +FT_END_HEADER + + +#endif /* FT_ERR_PROTOS_DEFINED */ + +#endif /* FT_INCLUDE_ERR_PROTOS */ + +#endif /* !(FTERRORS_H_ && __FTERRORS_H__) */ + + +/* END */ diff --git a/Example/include/freetype/ftfntfmt.h b/Example/include/freetype/ftfntfmt.h new file mode 100644 index 0000000..c0018fc --- /dev/null +++ b/Example/include/freetype/ftfntfmt.h @@ -0,0 +1,93 @@ +/**************************************************************************** + * + * ftfntfmt.h + * + * Support functions for font formats. + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTFNTFMT_H_ +#define FTFNTFMT_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * font_formats + * + * @title: + * Font Formats + * + * @abstract: + * Getting the font format. + * + * @description: + * The single function in this section can be used to get the font format. + * Note that this information is not needed normally; however, there are + * special cases (like in PDF devices) where it is important to + * differentiate, in spite of FreeType's uniform API. + * + */ + + + /************************************************************************** + * + * @function: + * FT_Get_Font_Format + * + * @description: + * Return a string describing the format of a given face. Possible values + * are 'TrueType', 'Type~1', 'BDF', 'PCF', 'Type~42', 'CID~Type~1', 'CFF', + * 'PFR', and 'Windows~FNT'. + * + * The return value is suitable to be used as an X11 FONT_PROPERTY. + * + * @input: + * face :: + * Input face handle. + * + * @return: + * Font format string. `NULL` in case of error. + * + * @note: + * A deprecated name for the same function is `FT_Get_X11_Font_Format`. + */ + FT_EXPORT( const char* ) + FT_Get_Font_Format( FT_Face face ); + + + /* deprecated */ + FT_EXPORT( const char* ) + FT_Get_X11_Font_Format( FT_Face face ); + + + /* */ + + +FT_END_HEADER + +#endif /* FTFNTFMT_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftgasp.h b/Example/include/freetype/ftgasp.h new file mode 100644 index 0000000..d5f19ad --- /dev/null +++ b/Example/include/freetype/ftgasp.h @@ -0,0 +1,143 @@ +/**************************************************************************** + * + * ftgasp.h + * + * Access of TrueType's 'gasp' table (specification). + * + * Copyright (C) 2007-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTGASP_H_ +#define FTGASP_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * gasp_table + * + * @title: + * Gasp Table + * + * @abstract: + * Retrieving TrueType 'gasp' table entries. + * + * @description: + * The function @FT_Get_Gasp can be used to query a TrueType or OpenType + * font for specific entries in its 'gasp' table, if any. This is mainly + * useful when implementing native TrueType hinting with the bytecode + * interpreter to duplicate the Windows text rendering results. + */ + + /************************************************************************** + * + * @enum: + * FT_GASP_XXX + * + * @description: + * A list of values and/or bit-flags returned by the @FT_Get_Gasp + * function. + * + * @values: + * FT_GASP_NO_TABLE :: + * This special value means that there is no GASP table in this face. + * It is up to the client to decide what to do. + * + * FT_GASP_DO_GRIDFIT :: + * Grid-fitting and hinting should be performed at the specified ppem. + * This **really** means TrueType bytecode interpretation. If this bit + * is not set, no hinting gets applied. + * + * FT_GASP_DO_GRAY :: + * Anti-aliased rendering should be performed at the specified ppem. + * If not set, do monochrome rendering. + * + * FT_GASP_SYMMETRIC_SMOOTHING :: + * If set, smoothing along multiple axes must be used with ClearType. + * + * FT_GASP_SYMMETRIC_GRIDFIT :: + * Grid-fitting must be used with ClearType's symmetric smoothing. + * + * @note: + * The bit-flags `FT_GASP_DO_GRIDFIT` and `FT_GASP_DO_GRAY` are to be + * used for standard font rasterization only. Independently of that, + * `FT_GASP_SYMMETRIC_SMOOTHING` and `FT_GASP_SYMMETRIC_GRIDFIT` are to + * be used if ClearType is enabled (and `FT_GASP_DO_GRIDFIT` and + * `FT_GASP_DO_GRAY` are consequently ignored). + * + * 'ClearType' is Microsoft's implementation of LCD rendering, partly + * protected by patents. + * + * @since: + * 2.3.0 + */ +#define FT_GASP_NO_TABLE -1 +#define FT_GASP_DO_GRIDFIT 0x01 +#define FT_GASP_DO_GRAY 0x02 +#define FT_GASP_SYMMETRIC_GRIDFIT 0x04 +#define FT_GASP_SYMMETRIC_SMOOTHING 0x08 + + + /************************************************************************** + * + * @function: + * FT_Get_Gasp + * + * @description: + * For a TrueType or OpenType font file, return the rasterizer behaviour + * flags from the font's 'gasp' table corresponding to a given character + * pixel size. + * + * @input: + * face :: + * The source face handle. + * + * ppem :: + * The vertical character pixel size. + * + * @return: + * Bit flags (see @FT_GASP_XXX), or @FT_GASP_NO_TABLE if there is no + * 'gasp' table in the face. + * + * @note: + * If you want to use the MM functionality of OpenType variation fonts + * (i.e., using @FT_Set_Var_Design_Coordinates and friends), call this + * function **after** setting an instance since the return values can + * change. + * + * @since: + * 2.3.0 + */ + FT_EXPORT( FT_Int ) + FT_Get_Gasp( FT_Face face, + FT_UInt ppem ); + + /* */ + + +FT_END_HEADER + +#endif /* FTGASP_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftglyph.h b/Example/include/freetype/ftglyph.h new file mode 100644 index 0000000..4658895 --- /dev/null +++ b/Example/include/freetype/ftglyph.h @@ -0,0 +1,750 @@ +/**************************************************************************** + * + * ftglyph.h + * + * FreeType convenience functions to handle glyphs (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This file contains the definition of several convenience functions that + * can be used by client applications to easily retrieve glyph bitmaps and + * outlines from a given face. + * + * These functions should be optional if you are writing a font server or + * text layout engine on top of FreeType. However, they are pretty handy + * for many other simple uses of the library. + * + */ + + +#ifndef FTGLYPH_H_ +#define FTGLYPH_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * glyph_management + * + * @title: + * Glyph Management + * + * @abstract: + * Generic interface to manage individual glyph data. + * + * @description: + * This section contains definitions used to manage glyph data through + * generic @FT_Glyph objects. Each of them can contain a bitmap, + * a vector outline, or even images in other formats. These objects are + * detached from @FT_Face, contrary to @FT_GlyphSlot. + * + */ + + + /* forward declaration to a private type */ + typedef struct FT_Glyph_Class_ FT_Glyph_Class; + + + /************************************************************************** + * + * @type: + * FT_Glyph + * + * @description: + * Handle to an object used to model generic glyph images. It is a + * pointer to the @FT_GlyphRec structure and can contain a glyph bitmap + * or pointer. + * + * @note: + * Glyph objects are not owned by the library. You must thus release + * them manually (through @FT_Done_Glyph) _before_ calling + * @FT_Done_FreeType. + */ + typedef struct FT_GlyphRec_* FT_Glyph; + + + /************************************************************************** + * + * @struct: + * FT_GlyphRec + * + * @description: + * The root glyph structure contains a given glyph image plus its advance + * width in 16.16 fixed-point format. + * + * @fields: + * library :: + * A handle to the FreeType library object. + * + * clazz :: + * A pointer to the glyph's class. Private. + * + * format :: + * The format of the glyph's image. + * + * advance :: + * A 16.16 vector that gives the glyph's advance width. + */ + typedef struct FT_GlyphRec_ + { + FT_Library library; + const FT_Glyph_Class* clazz; + FT_Glyph_Format format; + FT_Vector advance; + + } FT_GlyphRec; + + + /************************************************************************** + * + * @type: + * FT_BitmapGlyph + * + * @description: + * A handle to an object used to model a bitmap glyph image. This is a + * 'sub-class' of @FT_Glyph, and a pointer to @FT_BitmapGlyphRec. + */ + typedef struct FT_BitmapGlyphRec_* FT_BitmapGlyph; + + + /************************************************************************** + * + * @struct: + * FT_BitmapGlyphRec + * + * @description: + * A structure used for bitmap glyph images. This really is a + * 'sub-class' of @FT_GlyphRec. + * + * @fields: + * root :: + * The root fields of @FT_Glyph. + * + * left :: + * The left-side bearing, i.e., the horizontal distance from the + * current pen position to the left border of the glyph bitmap. + * + * top :: + * The top-side bearing, i.e., the vertical distance from the current + * pen position to the top border of the glyph bitmap. This distance + * is positive for upwards~y! + * + * bitmap :: + * A descriptor for the bitmap. + * + * @note: + * You can typecast an @FT_Glyph to @FT_BitmapGlyph if you have + * `glyph->format == FT_GLYPH_FORMAT_BITMAP`. This lets you access the + * bitmap's contents easily. + * + * The corresponding pixel buffer is always owned by @FT_BitmapGlyph and + * is thus created and destroyed with it. + */ + typedef struct FT_BitmapGlyphRec_ + { + FT_GlyphRec root; + FT_Int left; + FT_Int top; + FT_Bitmap bitmap; + + } FT_BitmapGlyphRec; + + + /************************************************************************** + * + * @type: + * FT_OutlineGlyph + * + * @description: + * A handle to an object used to model an outline glyph image. This is a + * 'sub-class' of @FT_Glyph, and a pointer to @FT_OutlineGlyphRec. + */ + typedef struct FT_OutlineGlyphRec_* FT_OutlineGlyph; + + + /************************************************************************** + * + * @struct: + * FT_OutlineGlyphRec + * + * @description: + * A structure used for outline (vectorial) glyph images. This really is + * a 'sub-class' of @FT_GlyphRec. + * + * @fields: + * root :: + * The root @FT_Glyph fields. + * + * outline :: + * A descriptor for the outline. + * + * @note: + * You can typecast an @FT_Glyph to @FT_OutlineGlyph if you have + * `glyph->format == FT_GLYPH_FORMAT_OUTLINE`. This lets you access the + * outline's content easily. + * + * As the outline is extracted from a glyph slot, its coordinates are + * expressed normally in 26.6 pixels, unless the flag @FT_LOAD_NO_SCALE + * was used in @FT_Load_Glyph or @FT_Load_Char. + * + * The outline's tables are always owned by the object and are destroyed + * with it. + */ + typedef struct FT_OutlineGlyphRec_ + { + FT_GlyphRec root; + FT_Outline outline; + + } FT_OutlineGlyphRec; + + + /************************************************************************** + * + * @type: + * FT_SvgGlyph + * + * @description: + * A handle to an object used to model an SVG glyph. This is a + * 'sub-class' of @FT_Glyph, and a pointer to @FT_SvgGlyphRec. + * + * @since: + * 2.12 + */ + typedef struct FT_SvgGlyphRec_* FT_SvgGlyph; + + + /************************************************************************** + * + * @struct: + * FT_SvgGlyphRec + * + * @description: + * A structure used for OT-SVG glyphs. This is a 'sub-class' of + * @FT_GlyphRec. + * + * @fields: + * root :: + * The root @FT_GlyphRec fields. + * + * svg_document :: + * A pointer to the SVG document. + * + * svg_document_length :: + * The length of `svg_document`. + * + * glyph_index :: + * The index of the glyph to be rendered. + * + * metrics :: + * A metrics object storing the size information. + * + * units_per_EM :: + * The size of the EM square. + * + * start_glyph_id :: + * The first glyph ID in the glyph range covered by this document. + * + * end_glyph_id :: + * The last glyph ID in the glyph range covered by this document. + * + * transform :: + * A 2x2 transformation matrix to apply to the glyph while rendering + * it. + * + * delta :: + * Translation to apply to the glyph while rendering. + * + * @note: + * The Glyph Management API requires @FT_Glyph or its 'sub-class' to have + * all the information needed to completely define the glyph's rendering. + * Outline-based glyphs can directly apply transformations to the outline + * but this is not possible for an SVG document that hasn't been parsed. + * Therefore, the transformation is stored along with the document. In + * the absence of a 'ViewBox' or 'Width'/'Height' attribute, the size of + * the ViewPort should be assumed to be 'units_per_EM'. + */ + typedef struct FT_SvgGlyphRec_ + { + FT_GlyphRec root; + + FT_Byte* svg_document; + FT_ULong svg_document_length; + + FT_UInt glyph_index; + + FT_Size_Metrics metrics; + FT_UShort units_per_EM; + + FT_UShort start_glyph_id; + FT_UShort end_glyph_id; + + FT_Matrix transform; + FT_Vector delta; + + } FT_SvgGlyphRec; + + + /************************************************************************** + * + * @function: + * FT_New_Glyph + * + * @description: + * A function used to create a new empty glyph image. Note that the + * created @FT_Glyph object must be released with @FT_Done_Glyph. + * + * @input: + * library :: + * A handle to the FreeType library object. + * + * format :: + * The format of the glyph's image. + * + * @output: + * aglyph :: + * A handle to the glyph object. + * + * @return: + * FreeType error code. 0~means success. + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Error ) + FT_New_Glyph( FT_Library library, + FT_Glyph_Format format, + FT_Glyph *aglyph ); + + + /************************************************************************** + * + * @function: + * FT_Get_Glyph + * + * @description: + * A function used to extract a glyph image from a slot. Note that the + * created @FT_Glyph object must be released with @FT_Done_Glyph. + * + * @input: + * slot :: + * A handle to the source glyph slot. + * + * @output: + * aglyph :: + * A handle to the glyph object. `NULL` in case of error. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Because `*aglyph->advance.x` and `*aglyph->advance.y` are 16.16 + * fixed-point numbers, `slot->advance.x` and `slot->advance.y` (which + * are in 26.6 fixed-point format) must be in the range ]-32768;32768[. + */ + FT_EXPORT( FT_Error ) + FT_Get_Glyph( FT_GlyphSlot slot, + FT_Glyph *aglyph ); + + + /************************************************************************** + * + * @function: + * FT_Glyph_Copy + * + * @description: + * A function used to copy a glyph image. Note that the created + * @FT_Glyph object must be released with @FT_Done_Glyph. + * + * @input: + * source :: + * A handle to the source glyph object. + * + * @output: + * target :: + * A handle to the target glyph object. `NULL` in case of error. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Glyph_Copy( FT_Glyph source, + FT_Glyph *target ); + + + /************************************************************************** + * + * @function: + * FT_Glyph_Transform + * + * @description: + * Transform a glyph image if its format is scalable. + * + * @inout: + * glyph :: + * A handle to the target glyph object. + * + * @input: + * matrix :: + * A pointer to a 2x2 matrix to apply. + * + * delta :: + * A pointer to a 2d vector to apply. Coordinates are expressed in + * 1/64 of a pixel. + * + * @return: + * FreeType error code (if not 0, the glyph format is not scalable). + * + * @note: + * The 2x2 transformation matrix is also applied to the glyph's advance + * vector. + */ + FT_EXPORT( FT_Error ) + FT_Glyph_Transform( FT_Glyph glyph, + const FT_Matrix* matrix, + const FT_Vector* delta ); + + + /************************************************************************** + * + * @enum: + * FT_Glyph_BBox_Mode + * + * @description: + * The mode how the values of @FT_Glyph_Get_CBox are returned. + * + * @values: + * FT_GLYPH_BBOX_UNSCALED :: + * Return unscaled font units. + * + * FT_GLYPH_BBOX_SUBPIXELS :: + * Return unfitted 26.6 coordinates. + * + * FT_GLYPH_BBOX_GRIDFIT :: + * Return grid-fitted 26.6 coordinates. + * + * FT_GLYPH_BBOX_TRUNCATE :: + * Return coordinates in integer pixels. + * + * FT_GLYPH_BBOX_PIXELS :: + * Return grid-fitted pixel coordinates. + */ + typedef enum FT_Glyph_BBox_Mode_ + { + FT_GLYPH_BBOX_UNSCALED = 0, + FT_GLYPH_BBOX_SUBPIXELS = 0, + FT_GLYPH_BBOX_GRIDFIT = 1, + FT_GLYPH_BBOX_TRUNCATE = 2, + FT_GLYPH_BBOX_PIXELS = 3 + + } FT_Glyph_BBox_Mode; + + + /* these constants are deprecated; use the corresponding */ + /* `FT_Glyph_BBox_Mode` values instead */ +#define ft_glyph_bbox_unscaled FT_GLYPH_BBOX_UNSCALED +#define ft_glyph_bbox_subpixels FT_GLYPH_BBOX_SUBPIXELS +#define ft_glyph_bbox_gridfit FT_GLYPH_BBOX_GRIDFIT +#define ft_glyph_bbox_truncate FT_GLYPH_BBOX_TRUNCATE +#define ft_glyph_bbox_pixels FT_GLYPH_BBOX_PIXELS + + + /************************************************************************** + * + * @function: + * FT_Glyph_Get_CBox + * + * @description: + * Return a glyph's 'control box'. The control box encloses all the + * outline's points, including Bezier control points. Though it + * coincides with the exact bounding box for most glyphs, it can be + * slightly larger in some situations (like when rotating an outline that + * contains Bezier outside arcs). + * + * Computing the control box is very fast, while getting the bounding box + * can take much more time as it needs to walk over all segments and arcs + * in the outline. To get the latter, you can use the 'ftbbox' + * component, which is dedicated to this single task. + * + * @input: + * glyph :: + * A handle to the source glyph object. + * + * mode :: + * The mode that indicates how to interpret the returned bounding box + * values. + * + * @output: + * acbox :: + * The glyph coordinate bounding box. Coordinates are expressed in + * 1/64 of pixels if it is grid-fitted. + * + * @note: + * Coordinates are relative to the glyph origin, using the y~upwards + * convention. + * + * If the glyph has been loaded with @FT_LOAD_NO_SCALE, `bbox_mode` must + * be set to @FT_GLYPH_BBOX_UNSCALED to get unscaled font units in 26.6 + * pixel format. The value @FT_GLYPH_BBOX_SUBPIXELS is another name for + * this constant. + * + * If the font is tricky and the glyph has been loaded with + * @FT_LOAD_NO_SCALE, the resulting CBox is meaningless. To get + * reasonable values for the CBox it is necessary to load the glyph at a + * large ppem value (so that the hinting instructions can properly shift + * and scale the subglyphs), then extracting the CBox, which can be + * eventually converted back to font units. + * + * Note that the maximum coordinates are exclusive, which means that one + * can compute the width and height of the glyph image (be it in integer + * or 26.6 pixels) as: + * + * ``` + * width = bbox.xMax - bbox.xMin; + * height = bbox.yMax - bbox.yMin; + * ``` + * + * Note also that for 26.6 coordinates, if `bbox_mode` is set to + * @FT_GLYPH_BBOX_GRIDFIT, the coordinates will also be grid-fitted, + * which corresponds to: + * + * ``` + * bbox.xMin = FLOOR(bbox.xMin); + * bbox.yMin = FLOOR(bbox.yMin); + * bbox.xMax = CEILING(bbox.xMax); + * bbox.yMax = CEILING(bbox.yMax); + * ``` + * + * To get the bbox in pixel coordinates, set `bbox_mode` to + * @FT_GLYPH_BBOX_TRUNCATE. + * + * To get the bbox in grid-fitted pixel coordinates, set `bbox_mode` to + * @FT_GLYPH_BBOX_PIXELS. + */ + FT_EXPORT( void ) + FT_Glyph_Get_CBox( FT_Glyph glyph, + FT_UInt bbox_mode, + FT_BBox *acbox ); + + + /************************************************************************** + * + * @function: + * FT_Glyph_To_Bitmap + * + * @description: + * Convert a given glyph object to a bitmap glyph object. + * + * @inout: + * the_glyph :: + * A pointer to a handle to the target glyph. + * + * @input: + * render_mode :: + * An enumeration that describes how the data is rendered. + * + * origin :: + * A pointer to a vector used to translate the glyph image before + * rendering. Can be~0 (if no translation). The origin is expressed + * in 26.6 pixels. + * + * destroy :: + * A boolean that indicates that the original glyph image should be + * destroyed by this function. It is never destroyed in case of error. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function does nothing if the glyph format isn't scalable. + * + * The glyph image is translated with the `origin` vector before + * rendering. + * + * The first parameter is a pointer to an @FT_Glyph handle that will be + * _replaced_ by this function (with newly allocated data). Typically, + * you would do something like the following (omitting error handling). + * + * ``` + * FT_Glyph glyph; + * FT_BitmapGlyph glyph_bitmap; + * + * + * // load glyph + * error = FT_Load_Char( face, glyph_index, FT_LOAD_DEFAULT ); + * + * // extract glyph image + * error = FT_Get_Glyph( face->glyph, &glyph ); + * + * // convert to a bitmap (default render mode + destroying old) + * if ( glyph->format != FT_GLYPH_FORMAT_BITMAP ) + * { + * error = FT_Glyph_To_Bitmap( &glyph, FT_RENDER_MODE_NORMAL, + * 0, 1 ); + * if ( error ) // `glyph' unchanged + * ... + * } + * + * // access bitmap content by typecasting + * glyph_bitmap = (FT_BitmapGlyph)glyph; + * + * // do funny stuff with it, like blitting/drawing + * ... + * + * // discard glyph image (bitmap or not) + * FT_Done_Glyph( glyph ); + * ``` + * + * Here is another example, again without error handling. + * + * ``` + * FT_Glyph glyphs[MAX_GLYPHS] + * + * + * ... + * + * for ( idx = 0; i < MAX_GLYPHS; i++ ) + * error = FT_Load_Glyph( face, idx, FT_LOAD_DEFAULT ) || + * FT_Get_Glyph ( face->glyph, &glyphs[idx] ); + * + * ... + * + * for ( idx = 0; i < MAX_GLYPHS; i++ ) + * { + * FT_Glyph bitmap = glyphs[idx]; + * + * + * ... + * + * // after this call, `bitmap' no longer points into + * // the `glyphs' array (and the old value isn't destroyed) + * FT_Glyph_To_Bitmap( &bitmap, FT_RENDER_MODE_MONO, 0, 0 ); + * + * ... + * + * FT_Done_Glyph( bitmap ); + * } + * + * ... + * + * for ( idx = 0; i < MAX_GLYPHS; i++ ) + * FT_Done_Glyph( glyphs[idx] ); + * ``` + */ + FT_EXPORT( FT_Error ) + FT_Glyph_To_Bitmap( FT_Glyph* the_glyph, + FT_Render_Mode render_mode, + const FT_Vector* origin, + FT_Bool destroy ); + + + /************************************************************************** + * + * @function: + * FT_Done_Glyph + * + * @description: + * Destroy a given glyph. + * + * @input: + * glyph :: + * A handle to the target glyph object. Can be `NULL`. + */ + FT_EXPORT( void ) + FT_Done_Glyph( FT_Glyph glyph ); + + /* */ + + + /* other helpful functions */ + + /************************************************************************** + * + * @section: + * computations + * + */ + + + /************************************************************************** + * + * @function: + * FT_Matrix_Multiply + * + * @description: + * Perform the matrix operation `b = a*b`. + * + * @input: + * a :: + * A pointer to matrix `a`. + * + * @inout: + * b :: + * A pointer to matrix `b`. + * + * @note: + * The result is undefined if either `a` or `b` is zero. + * + * Since the function uses wrap-around arithmetic, results become + * meaningless if the arguments are very large. + */ + FT_EXPORT( void ) + FT_Matrix_Multiply( const FT_Matrix* a, + FT_Matrix* b ); + + + /************************************************************************** + * + * @function: + * FT_Matrix_Invert + * + * @description: + * Invert a 2x2 matrix. Return an error if it can't be inverted. + * + * @inout: + * matrix :: + * A pointer to the target matrix. Remains untouched in case of error. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Matrix_Invert( FT_Matrix* matrix ); + + /* */ + + +FT_END_HEADER + +#endif /* FTGLYPH_H_ */ + + +/* END */ + + +/* Local Variables: */ +/* coding: utf-8 */ +/* End: */ diff --git a/Example/include/freetype/ftgxval.h b/Example/include/freetype/ftgxval.h new file mode 100644 index 0000000..e8de9a6 --- /dev/null +++ b/Example/include/freetype/ftgxval.h @@ -0,0 +1,354 @@ +/**************************************************************************** + * + * ftgxval.h + * + * FreeType API for validating TrueTypeGX/AAT tables (specification). + * + * Copyright (C) 2004-2023 by + * Masatake YAMATO, Redhat K.K, + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + +/**************************************************************************** + * + * gxvalid is derived from both gxlayout module and otvalid module. + * Development of gxlayout is supported by the Information-technology + * Promotion Agency(IPA), Japan. + * + */ + + +#ifndef FTGXVAL_H_ +#define FTGXVAL_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * gx_validation + * + * @title: + * TrueTypeGX/AAT Validation + * + * @abstract: + * An API to validate TrueTypeGX/AAT tables. + * + * @description: + * This section contains the declaration of functions to validate some + * TrueTypeGX tables (feat, mort, morx, bsln, just, kern, opbd, trak, + * prop, lcar). + * + * @order: + * FT_TrueTypeGX_Validate + * FT_TrueTypeGX_Free + * + * FT_ClassicKern_Validate + * FT_ClassicKern_Free + * + * FT_VALIDATE_GX_LENGTH + * FT_VALIDATE_GXXXX + * FT_VALIDATE_CKERNXXX + * + */ + + /************************************************************************** + * + * + * Warning: Use `FT_VALIDATE_XXX` to validate a table. + * Following definitions are for gxvalid developers. + * + * + */ + +#define FT_VALIDATE_feat_INDEX 0 +#define FT_VALIDATE_mort_INDEX 1 +#define FT_VALIDATE_morx_INDEX 2 +#define FT_VALIDATE_bsln_INDEX 3 +#define FT_VALIDATE_just_INDEX 4 +#define FT_VALIDATE_kern_INDEX 5 +#define FT_VALIDATE_opbd_INDEX 6 +#define FT_VALIDATE_trak_INDEX 7 +#define FT_VALIDATE_prop_INDEX 8 +#define FT_VALIDATE_lcar_INDEX 9 +#define FT_VALIDATE_GX_LAST_INDEX FT_VALIDATE_lcar_INDEX + + + /************************************************************************** + * + * @macro: + * FT_VALIDATE_GX_LENGTH + * + * @description: + * The number of tables checked in this module. Use it as a parameter + * for the `table-length` argument of function @FT_TrueTypeGX_Validate. + */ +#define FT_VALIDATE_GX_LENGTH ( FT_VALIDATE_GX_LAST_INDEX + 1 ) + + /* */ + + /* Up to 0x1000 is used by otvalid. + Ox2xxx is reserved for feature OT extension. */ +#define FT_VALIDATE_GX_START 0x4000 +#define FT_VALIDATE_GX_BITFIELD( tag ) \ + ( FT_VALIDATE_GX_START << FT_VALIDATE_##tag##_INDEX ) + + + /************************************************************************** + * + * @enum: + * FT_VALIDATE_GXXXX + * + * @description: + * A list of bit-field constants used with @FT_TrueTypeGX_Validate to + * indicate which TrueTypeGX/AAT Type tables should be validated. + * + * @values: + * FT_VALIDATE_feat :: + * Validate 'feat' table. + * + * FT_VALIDATE_mort :: + * Validate 'mort' table. + * + * FT_VALIDATE_morx :: + * Validate 'morx' table. + * + * FT_VALIDATE_bsln :: + * Validate 'bsln' table. + * + * FT_VALIDATE_just :: + * Validate 'just' table. + * + * FT_VALIDATE_kern :: + * Validate 'kern' table. + * + * FT_VALIDATE_opbd :: + * Validate 'opbd' table. + * + * FT_VALIDATE_trak :: + * Validate 'trak' table. + * + * FT_VALIDATE_prop :: + * Validate 'prop' table. + * + * FT_VALIDATE_lcar :: + * Validate 'lcar' table. + * + * FT_VALIDATE_GX :: + * Validate all TrueTypeGX tables (feat, mort, morx, bsln, just, kern, + * opbd, trak, prop and lcar). + * + */ + +#define FT_VALIDATE_feat FT_VALIDATE_GX_BITFIELD( feat ) +#define FT_VALIDATE_mort FT_VALIDATE_GX_BITFIELD( mort ) +#define FT_VALIDATE_morx FT_VALIDATE_GX_BITFIELD( morx ) +#define FT_VALIDATE_bsln FT_VALIDATE_GX_BITFIELD( bsln ) +#define FT_VALIDATE_just FT_VALIDATE_GX_BITFIELD( just ) +#define FT_VALIDATE_kern FT_VALIDATE_GX_BITFIELD( kern ) +#define FT_VALIDATE_opbd FT_VALIDATE_GX_BITFIELD( opbd ) +#define FT_VALIDATE_trak FT_VALIDATE_GX_BITFIELD( trak ) +#define FT_VALIDATE_prop FT_VALIDATE_GX_BITFIELD( prop ) +#define FT_VALIDATE_lcar FT_VALIDATE_GX_BITFIELD( lcar ) + +#define FT_VALIDATE_GX ( FT_VALIDATE_feat | \ + FT_VALIDATE_mort | \ + FT_VALIDATE_morx | \ + FT_VALIDATE_bsln | \ + FT_VALIDATE_just | \ + FT_VALIDATE_kern | \ + FT_VALIDATE_opbd | \ + FT_VALIDATE_trak | \ + FT_VALIDATE_prop | \ + FT_VALIDATE_lcar ) + + + /************************************************************************** + * + * @function: + * FT_TrueTypeGX_Validate + * + * @description: + * Validate various TrueTypeGX tables to assure that all offsets and + * indices are valid. The idea is that a higher-level library that + * actually does the text layout can access those tables without error + * checking (which can be quite time consuming). + * + * @input: + * face :: + * A handle to the input face. + * + * validation_flags :: + * A bit field that specifies the tables to be validated. See + * @FT_VALIDATE_GXXXX for possible values. + * + * table_length :: + * The size of the `tables` array. Normally, @FT_VALIDATE_GX_LENGTH + * should be passed. + * + * @output: + * tables :: + * The array where all validated sfnt tables are stored. The array + * itself must be allocated by a client. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function only works with TrueTypeGX fonts, returning an error + * otherwise. + * + * After use, the application should deallocate the buffers pointed to by + * each `tables` element, by calling @FT_TrueTypeGX_Free. A `NULL` value + * indicates that the table either doesn't exist in the font, the + * application hasn't asked for validation, or the validator doesn't have + * the ability to validate the sfnt table. + */ + FT_EXPORT( FT_Error ) + FT_TrueTypeGX_Validate( FT_Face face, + FT_UInt validation_flags, + FT_Bytes tables[FT_VALIDATE_GX_LENGTH], + FT_UInt table_length ); + + + /************************************************************************** + * + * @function: + * FT_TrueTypeGX_Free + * + * @description: + * Free the buffer allocated by TrueTypeGX validator. + * + * @input: + * face :: + * A handle to the input face. + * + * table :: + * The pointer to the buffer allocated by @FT_TrueTypeGX_Validate. + * + * @note: + * This function must be used to free the buffer allocated by + * @FT_TrueTypeGX_Validate only. + */ + FT_EXPORT( void ) + FT_TrueTypeGX_Free( FT_Face face, + FT_Bytes table ); + + + /************************************************************************** + * + * @enum: + * FT_VALIDATE_CKERNXXX + * + * @description: + * A list of bit-field constants used with @FT_ClassicKern_Validate to + * indicate the classic kern dialect or dialects. If the selected type + * doesn't fit, @FT_ClassicKern_Validate regards the table as invalid. + * + * @values: + * FT_VALIDATE_MS :: + * Handle the 'kern' table as a classic Microsoft kern table. + * + * FT_VALIDATE_APPLE :: + * Handle the 'kern' table as a classic Apple kern table. + * + * FT_VALIDATE_CKERN :: + * Handle the 'kern' as either classic Apple or Microsoft kern table. + */ +#define FT_VALIDATE_MS ( FT_VALIDATE_GX_START << 0 ) +#define FT_VALIDATE_APPLE ( FT_VALIDATE_GX_START << 1 ) + +#define FT_VALIDATE_CKERN ( FT_VALIDATE_MS | FT_VALIDATE_APPLE ) + + + /************************************************************************** + * + * @function: + * FT_ClassicKern_Validate + * + * @description: + * Validate classic (16-bit format) kern table to assure that the + * offsets and indices are valid. The idea is that a higher-level + * library that actually does the text layout can access those tables + * without error checking (which can be quite time consuming). + * + * The 'kern' table validator in @FT_TrueTypeGX_Validate deals with both + * the new 32-bit format and the classic 16-bit format, while + * FT_ClassicKern_Validate only supports the classic 16-bit format. + * + * @input: + * face :: + * A handle to the input face. + * + * validation_flags :: + * A bit field that specifies the dialect to be validated. See + * @FT_VALIDATE_CKERNXXX for possible values. + * + * @output: + * ckern_table :: + * A pointer to the kern table. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * After use, the application should deallocate the buffers pointed to by + * `ckern_table`, by calling @FT_ClassicKern_Free. A `NULL` value + * indicates that the table doesn't exist in the font. + */ + FT_EXPORT( FT_Error ) + FT_ClassicKern_Validate( FT_Face face, + FT_UInt validation_flags, + FT_Bytes *ckern_table ); + + + /************************************************************************** + * + * @function: + * FT_ClassicKern_Free + * + * @description: + * Free the buffer allocated by classic Kern validator. + * + * @input: + * face :: + * A handle to the input face. + * + * table :: + * The pointer to the buffer that is allocated by + * @FT_ClassicKern_Validate. + * + * @note: + * This function must be used to free the buffer allocated by + * @FT_ClassicKern_Validate only. + */ + FT_EXPORT( void ) + FT_ClassicKern_Free( FT_Face face, + FT_Bytes table ); + + /* */ + + +FT_END_HEADER + +#endif /* FTGXVAL_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftgzip.h b/Example/include/freetype/ftgzip.h new file mode 100644 index 0000000..443ec29 --- /dev/null +++ b/Example/include/freetype/ftgzip.h @@ -0,0 +1,151 @@ +/**************************************************************************** + * + * ftgzip.h + * + * Gzip-compressed stream support. + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTGZIP_H_ +#define FTGZIP_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @section: + * gzip + * + * @title: + * GZIP Streams + * + * @abstract: + * Using gzip-compressed font files. + * + * @description: + * In certain builds of the library, gzip compression recognition is + * automatically handled when calling @FT_New_Face or @FT_Open_Face. + * This means that if no font driver is capable of handling the raw + * compressed file, the library will try to open a gzipped stream from it + * and re-open the face with it. + * + * The stream implementation is very basic and resets the decompression + * process each time seeking backwards is needed within the stream, + * which significantly undermines the performance. + * + * This section contains the declaration of Gzip-specific functions. + * + */ + + + /************************************************************************** + * + * @function: + * FT_Stream_OpenGzip + * + * @description: + * Open a new stream to parse gzip-compressed font files. This is mainly + * used to support the compressed `*.pcf.gz` fonts that come with + * XFree86. + * + * @input: + * stream :: + * The target embedding stream. + * + * source :: + * The source stream. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The source stream must be opened _before_ calling this function. + * + * Calling the internal function `FT_Stream_Close` on the new stream will + * **not** call `FT_Stream_Close` on the source stream. None of the + * stream objects will be released to the heap. + * + * This function may return `FT_Err_Unimplemented_Feature` if your build + * of FreeType was not compiled with zlib support. + */ + FT_EXPORT( FT_Error ) + FT_Stream_OpenGzip( FT_Stream stream, + FT_Stream source ); + + + /************************************************************************** + * + * @function: + * FT_Gzip_Uncompress + * + * @description: + * Decompress a zipped input buffer into an output buffer. This function + * is modeled after zlib's `uncompress` function. + * + * @input: + * memory :: + * A FreeType memory handle. + * + * input :: + * The input buffer. + * + * input_len :: + * The length of the input buffer. + * + * @output: + * output :: + * The output buffer. + * + * @inout: + * output_len :: + * Before calling the function, this is the total size of the output + * buffer, which must be large enough to hold the entire uncompressed + * data (so the size of the uncompressed data must be known in + * advance). After calling the function, `output_len` is the size of + * the used data in `output`. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function may return `FT_Err_Unimplemented_Feature` if your build + * of FreeType was not compiled with zlib support. + * + * @since: + * 2.5.1 + */ + FT_EXPORT( FT_Error ) + FT_Gzip_Uncompress( FT_Memory memory, + FT_Byte* output, + FT_ULong* output_len, + const FT_Byte* input, + FT_ULong input_len ); + + /* */ + + +FT_END_HEADER + +#endif /* FTGZIP_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftimage.h b/Example/include/freetype/ftimage.h new file mode 100644 index 0000000..6baa812 --- /dev/null +++ b/Example/include/freetype/ftimage.h @@ -0,0 +1,1284 @@ +/**************************************************************************** + * + * ftimage.h + * + * FreeType glyph image formats and default raster interface + * (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + /************************************************************************** + * + * Note: A 'raster' is simply a scan-line converter, used to render + * `FT_Outline`s into `FT_Bitmap`s. + * + */ + + +#ifndef FTIMAGE_H_ +#define FTIMAGE_H_ + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * basic_types + * + */ + + + /************************************************************************** + * + * @type: + * FT_Pos + * + * @description: + * The type FT_Pos is used to store vectorial coordinates. Depending on + * the context, these can represent distances in integer font units, or + * 16.16, or 26.6 fixed-point pixel coordinates. + */ + typedef signed long FT_Pos; + + + /************************************************************************** + * + * @struct: + * FT_Vector + * + * @description: + * A simple structure used to store a 2D vector; coordinates are of the + * FT_Pos type. + * + * @fields: + * x :: + * The horizontal coordinate. + * y :: + * The vertical coordinate. + */ + typedef struct FT_Vector_ + { + FT_Pos x; + FT_Pos y; + + } FT_Vector; + + + /************************************************************************** + * + * @struct: + * FT_BBox + * + * @description: + * A structure used to hold an outline's bounding box, i.e., the + * coordinates of its extrema in the horizontal and vertical directions. + * + * @fields: + * xMin :: + * The horizontal minimum (left-most). + * + * yMin :: + * The vertical minimum (bottom-most). + * + * xMax :: + * The horizontal maximum (right-most). + * + * yMax :: + * The vertical maximum (top-most). + * + * @note: + * The bounding box is specified with the coordinates of the lower left + * and the upper right corner. In PostScript, those values are often + * called (llx,lly) and (urx,ury), respectively. + * + * If `yMin` is negative, this value gives the glyph's descender. + * Otherwise, the glyph doesn't descend below the baseline. Similarly, + * if `ymax` is positive, this value gives the glyph's ascender. + * + * `xMin` gives the horizontal distance from the glyph's origin to the + * left edge of the glyph's bounding box. If `xMin` is negative, the + * glyph extends to the left of the origin. + */ + typedef struct FT_BBox_ + { + FT_Pos xMin, yMin; + FT_Pos xMax, yMax; + + } FT_BBox; + + + /************************************************************************** + * + * @enum: + * FT_Pixel_Mode + * + * @description: + * An enumeration type used to describe the format of pixels in a given + * bitmap. Note that additional formats may be added in the future. + * + * @values: + * FT_PIXEL_MODE_NONE :: + * Value~0 is reserved. + * + * FT_PIXEL_MODE_MONO :: + * A monochrome bitmap, using 1~bit per pixel. Note that pixels are + * stored in most-significant order (MSB), which means that the + * left-most pixel in a byte has value 128. + * + * FT_PIXEL_MODE_GRAY :: + * An 8-bit bitmap, generally used to represent anti-aliased glyph + * images. Each pixel is stored in one byte. Note that the number of + * 'gray' levels is stored in the `num_grays` field of the @FT_Bitmap + * structure (it generally is 256). + * + * FT_PIXEL_MODE_GRAY2 :: + * A 2-bit per pixel bitmap, used to represent embedded anti-aliased + * bitmaps in font files according to the OpenType specification. We + * haven't found a single font using this format, however. + * + * FT_PIXEL_MODE_GRAY4 :: + * A 4-bit per pixel bitmap, representing embedded anti-aliased bitmaps + * in font files according to the OpenType specification. We haven't + * found a single font using this format, however. + * + * FT_PIXEL_MODE_LCD :: + * An 8-bit bitmap, representing RGB or BGR decimated glyph images used + * for display on LCD displays; the bitmap is three times wider than + * the original glyph image. See also @FT_RENDER_MODE_LCD. + * + * FT_PIXEL_MODE_LCD_V :: + * An 8-bit bitmap, representing RGB or BGR decimated glyph images used + * for display on rotated LCD displays; the bitmap is three times + * taller than the original glyph image. See also + * @FT_RENDER_MODE_LCD_V. + * + * FT_PIXEL_MODE_BGRA :: + * [Since 2.5] An image with four 8-bit channels per pixel, + * representing a color image (such as emoticons) with alpha channel. + * For each pixel, the format is BGRA, which means, the blue channel + * comes first in memory. The color channels are pre-multiplied and in + * the sRGB colorspace. For example, full red at half-translucent + * opacity will be represented as '00,00,80,80', not '00,00,FF,80'. + * See also @FT_LOAD_COLOR. + */ + typedef enum FT_Pixel_Mode_ + { + FT_PIXEL_MODE_NONE = 0, + FT_PIXEL_MODE_MONO, + FT_PIXEL_MODE_GRAY, + FT_PIXEL_MODE_GRAY2, + FT_PIXEL_MODE_GRAY4, + FT_PIXEL_MODE_LCD, + FT_PIXEL_MODE_LCD_V, + FT_PIXEL_MODE_BGRA, + + FT_PIXEL_MODE_MAX /* do not remove */ + + } FT_Pixel_Mode; + + + /* these constants are deprecated; use the corresponding `FT_Pixel_Mode` */ + /* values instead. */ +#define ft_pixel_mode_none FT_PIXEL_MODE_NONE +#define ft_pixel_mode_mono FT_PIXEL_MODE_MONO +#define ft_pixel_mode_grays FT_PIXEL_MODE_GRAY +#define ft_pixel_mode_pal2 FT_PIXEL_MODE_GRAY2 +#define ft_pixel_mode_pal4 FT_PIXEL_MODE_GRAY4 + + /* */ + + /* For debugging, the @FT_Pixel_Mode enumeration must stay in sync */ + /* with the `pixel_modes` array in file `ftobjs.c`. */ + + + /************************************************************************** + * + * @struct: + * FT_Bitmap + * + * @description: + * A structure used to describe a bitmap or pixmap to the raster. Note + * that we now manage pixmaps of various depths through the `pixel_mode` + * field. + * + * @fields: + * rows :: + * The number of bitmap rows. + * + * width :: + * The number of pixels in bitmap row. + * + * pitch :: + * The pitch's absolute value is the number of bytes taken by one + * bitmap row, including padding. However, the pitch is positive when + * the bitmap has a 'down' flow, and negative when it has an 'up' flow. + * In all cases, the pitch is an offset to add to a bitmap pointer in + * order to go down one row. + * + * Note that 'padding' means the alignment of a bitmap to a byte + * border, and FreeType functions normally align to the smallest + * possible integer value. + * + * For the B/W rasterizer, `pitch` is always an even number. + * + * To change the pitch of a bitmap (say, to make it a multiple of 4), + * use @FT_Bitmap_Convert. Alternatively, you might use callback + * functions to directly render to the application's surface; see the + * file `example2.cpp` in the tutorial for a demonstration. + * + * buffer :: + * A typeless pointer to the bitmap buffer. This value should be + * aligned on 32-bit boundaries in most cases. + * + * num_grays :: + * This field is only used with @FT_PIXEL_MODE_GRAY; it gives the + * number of gray levels used in the bitmap. + * + * pixel_mode :: + * The pixel mode, i.e., how pixel bits are stored. See @FT_Pixel_Mode + * for possible values. + * + * palette_mode :: + * This field is intended for paletted pixel modes; it indicates how + * the palette is stored. Not used currently. + * + * palette :: + * A typeless pointer to the bitmap palette; this field is intended for + * paletted pixel modes. Not used currently. + * + * @note: + * `width` and `rows` refer to the *physical* size of the bitmap, not the + * *logical* one. For example, if @FT_Pixel_Mode is set to + * `FT_PIXEL_MODE_LCD`, the logical width is a just a third of the + * physical one. + */ + typedef struct FT_Bitmap_ + { + unsigned int rows; + unsigned int width; + int pitch; + unsigned char* buffer; + unsigned short num_grays; + unsigned char pixel_mode; + unsigned char palette_mode; + void* palette; + + } FT_Bitmap; + + + /************************************************************************** + * + * @section: + * outline_processing + * + */ + + + /************************************************************************** + * + * @struct: + * FT_Outline + * + * @description: + * This structure is used to describe an outline to the scan-line + * converter. + * + * @fields: + * n_contours :: + * The number of contours in the outline. + * + * n_points :: + * The number of points in the outline. + * + * points :: + * A pointer to an array of `n_points` @FT_Vector elements, giving the + * outline's point coordinates. + * + * tags :: + * A pointer to an array of `n_points` chars, giving each outline + * point's type. + * + * If bit~0 is unset, the point is 'off' the curve, i.e., a Bezier + * control point, while it is 'on' if set. + * + * Bit~1 is meaningful for 'off' points only. If set, it indicates a + * third-order Bezier arc control point; and a second-order control + * point if unset. + * + * If bit~2 is set, bits 5-7 contain the drop-out mode (as defined in + * the OpenType specification; the value is the same as the argument to + * the 'SCANMODE' instruction). + * + * Bits 3 and~4 are reserved for internal purposes. + * + * contours :: + * An array of `n_contours` shorts, giving the end point of each + * contour within the outline. For example, the first contour is + * defined by the points '0' to `contours[0]`, the second one is + * defined by the points `contours[0]+1` to `contours[1]`, etc. + * + * flags :: + * A set of bit flags used to characterize the outline and give hints + * to the scan-converter and hinter on how to convert/grid-fit it. See + * @FT_OUTLINE_XXX. + * + * @note: + * The B/W rasterizer only checks bit~2 in the `tags` array for the first + * point of each contour. The drop-out mode as given with + * @FT_OUTLINE_IGNORE_DROPOUTS, @FT_OUTLINE_SMART_DROPOUTS, and + * @FT_OUTLINE_INCLUDE_STUBS in `flags` is then overridden. + */ + typedef struct FT_Outline_ + { + short n_contours; /* number of contours in glyph */ + short n_points; /* number of points in the glyph */ + + FT_Vector* points; /* the outline's points */ + char* tags; /* the points flags */ + short* contours; /* the contour end points */ + + int flags; /* outline masks */ + + } FT_Outline; + + /* */ + + /* Following limits must be consistent with */ + /* FT_Outline.{n_contours,n_points} */ +#define FT_OUTLINE_CONTOURS_MAX SHRT_MAX +#define FT_OUTLINE_POINTS_MAX SHRT_MAX + + + /************************************************************************** + * + * @enum: + * FT_OUTLINE_XXX + * + * @description: + * A list of bit-field constants used for the flags in an outline's + * `flags` field. + * + * @values: + * FT_OUTLINE_NONE :: + * Value~0 is reserved. + * + * FT_OUTLINE_OWNER :: + * If set, this flag indicates that the outline's field arrays (i.e., + * `points`, `flags`, and `contours`) are 'owned' by the outline + * object, and should thus be freed when it is destroyed. + * + * FT_OUTLINE_EVEN_ODD_FILL :: + * By default, outlines are filled using the non-zero winding rule. If + * set to 1, the outline will be filled using the even-odd fill rule + * (only works with the smooth rasterizer). + * + * FT_OUTLINE_REVERSE_FILL :: + * By default, outside contours of an outline are oriented in + * clock-wise direction, as defined in the TrueType specification. + * This flag is set if the outline uses the opposite direction + * (typically for Type~1 fonts). This flag is ignored by the scan + * converter. + * + * FT_OUTLINE_IGNORE_DROPOUTS :: + * By default, the scan converter will try to detect drop-outs in an + * outline and correct the glyph bitmap to ensure consistent shape + * continuity. If set, this flag hints the scan-line converter to + * ignore such cases. See below for more information. + * + * FT_OUTLINE_SMART_DROPOUTS :: + * Select smart dropout control. If unset, use simple dropout control. + * Ignored if @FT_OUTLINE_IGNORE_DROPOUTS is set. See below for more + * information. + * + * FT_OUTLINE_INCLUDE_STUBS :: + * If set, turn pixels on for 'stubs', otherwise exclude them. Ignored + * if @FT_OUTLINE_IGNORE_DROPOUTS is set. See below for more + * information. + * + * FT_OUTLINE_OVERLAP :: + * [Since 2.10.3] This flag indicates that this outline contains + * overlapping contours and the anti-aliased renderer should perform + * oversampling to mitigate possible artifacts. This flag should _not_ + * be set for well designed glyphs without overlaps because it quadruples + * the rendering time. + * + * FT_OUTLINE_HIGH_PRECISION :: + * This flag indicates that the scan-line converter should try to + * convert this outline to bitmaps with the highest possible quality. + * It is typically set for small character sizes. Note that this is + * only a hint that might be completely ignored by a given + * scan-converter. + * + * FT_OUTLINE_SINGLE_PASS :: + * This flag is set to force a given scan-converter to only use a + * single pass over the outline to render a bitmap glyph image. + * Normally, it is set for very large character sizes. It is only a + * hint that might be completely ignored by a given scan-converter. + * + * @note: + * The flags @FT_OUTLINE_IGNORE_DROPOUTS, @FT_OUTLINE_SMART_DROPOUTS, and + * @FT_OUTLINE_INCLUDE_STUBS are ignored by the smooth rasterizer. + * + * There exists a second mechanism to pass the drop-out mode to the B/W + * rasterizer; see the `tags` field in @FT_Outline. + * + * Please refer to the description of the 'SCANTYPE' instruction in the + * OpenType specification (in file `ttinst1.doc`) how simple drop-outs, + * smart drop-outs, and stubs are defined. + */ +#define FT_OUTLINE_NONE 0x0 +#define FT_OUTLINE_OWNER 0x1 +#define FT_OUTLINE_EVEN_ODD_FILL 0x2 +#define FT_OUTLINE_REVERSE_FILL 0x4 +#define FT_OUTLINE_IGNORE_DROPOUTS 0x8 +#define FT_OUTLINE_SMART_DROPOUTS 0x10 +#define FT_OUTLINE_INCLUDE_STUBS 0x20 +#define FT_OUTLINE_OVERLAP 0x40 + +#define FT_OUTLINE_HIGH_PRECISION 0x100 +#define FT_OUTLINE_SINGLE_PASS 0x200 + + + /* these constants are deprecated; use the corresponding */ + /* `FT_OUTLINE_XXX` values instead */ +#define ft_outline_none FT_OUTLINE_NONE +#define ft_outline_owner FT_OUTLINE_OWNER +#define ft_outline_even_odd_fill FT_OUTLINE_EVEN_ODD_FILL +#define ft_outline_reverse_fill FT_OUTLINE_REVERSE_FILL +#define ft_outline_ignore_dropouts FT_OUTLINE_IGNORE_DROPOUTS +#define ft_outline_high_precision FT_OUTLINE_HIGH_PRECISION +#define ft_outline_single_pass FT_OUTLINE_SINGLE_PASS + + /* */ + +#define FT_CURVE_TAG( flag ) ( flag & 0x03 ) + + /* see the `tags` field in `FT_Outline` for a description of the values */ +#define FT_CURVE_TAG_ON 0x01 +#define FT_CURVE_TAG_CONIC 0x00 +#define FT_CURVE_TAG_CUBIC 0x02 + +#define FT_CURVE_TAG_HAS_SCANMODE 0x04 + +#define FT_CURVE_TAG_TOUCH_X 0x08 /* reserved for TrueType hinter */ +#define FT_CURVE_TAG_TOUCH_Y 0x10 /* reserved for TrueType hinter */ + +#define FT_CURVE_TAG_TOUCH_BOTH ( FT_CURVE_TAG_TOUCH_X | \ + FT_CURVE_TAG_TOUCH_Y ) + /* values 0x20, 0x40, and 0x80 are reserved */ + + + /* these constants are deprecated; use the corresponding */ + /* `FT_CURVE_TAG_XXX` values instead */ +#define FT_Curve_Tag_On FT_CURVE_TAG_ON +#define FT_Curve_Tag_Conic FT_CURVE_TAG_CONIC +#define FT_Curve_Tag_Cubic FT_CURVE_TAG_CUBIC +#define FT_Curve_Tag_Touch_X FT_CURVE_TAG_TOUCH_X +#define FT_Curve_Tag_Touch_Y FT_CURVE_TAG_TOUCH_Y + + + /************************************************************************** + * + * @functype: + * FT_Outline_MoveToFunc + * + * @description: + * A function pointer type used to describe the signature of a 'move to' + * function during outline walking/decomposition. + * + * A 'move to' is emitted to start a new contour in an outline. + * + * @input: + * to :: + * A pointer to the target point of the 'move to'. + * + * user :: + * A typeless pointer, which is passed from the caller of the + * decomposition function. + * + * @return: + * Error code. 0~means success. + */ + typedef int + (*FT_Outline_MoveToFunc)( const FT_Vector* to, + void* user ); + +#define FT_Outline_MoveTo_Func FT_Outline_MoveToFunc + + + /************************************************************************** + * + * @functype: + * FT_Outline_LineToFunc + * + * @description: + * A function pointer type used to describe the signature of a 'line to' + * function during outline walking/decomposition. + * + * A 'line to' is emitted to indicate a segment in the outline. + * + * @input: + * to :: + * A pointer to the target point of the 'line to'. + * + * user :: + * A typeless pointer, which is passed from the caller of the + * decomposition function. + * + * @return: + * Error code. 0~means success. + */ + typedef int + (*FT_Outline_LineToFunc)( const FT_Vector* to, + void* user ); + +#define FT_Outline_LineTo_Func FT_Outline_LineToFunc + + + /************************************************************************** + * + * @functype: + * FT_Outline_ConicToFunc + * + * @description: + * A function pointer type used to describe the signature of a 'conic to' + * function during outline walking or decomposition. + * + * A 'conic to' is emitted to indicate a second-order Bezier arc in the + * outline. + * + * @input: + * control :: + * An intermediate control point between the last position and the new + * target in `to`. + * + * to :: + * A pointer to the target end point of the conic arc. + * + * user :: + * A typeless pointer, which is passed from the caller of the + * decomposition function. + * + * @return: + * Error code. 0~means success. + */ + typedef int + (*FT_Outline_ConicToFunc)( const FT_Vector* control, + const FT_Vector* to, + void* user ); + +#define FT_Outline_ConicTo_Func FT_Outline_ConicToFunc + + + /************************************************************************** + * + * @functype: + * FT_Outline_CubicToFunc + * + * @description: + * A function pointer type used to describe the signature of a 'cubic to' + * function during outline walking or decomposition. + * + * A 'cubic to' is emitted to indicate a third-order Bezier arc. + * + * @input: + * control1 :: + * A pointer to the first Bezier control point. + * + * control2 :: + * A pointer to the second Bezier control point. + * + * to :: + * A pointer to the target end point. + * + * user :: + * A typeless pointer, which is passed from the caller of the + * decomposition function. + * + * @return: + * Error code. 0~means success. + */ + typedef int + (*FT_Outline_CubicToFunc)( const FT_Vector* control1, + const FT_Vector* control2, + const FT_Vector* to, + void* user ); + +#define FT_Outline_CubicTo_Func FT_Outline_CubicToFunc + + + /************************************************************************** + * + * @struct: + * FT_Outline_Funcs + * + * @description: + * A structure to hold various function pointers used during outline + * decomposition in order to emit segments, conic, and cubic Beziers. + * + * @fields: + * move_to :: + * The 'move to' emitter. + * + * line_to :: + * The segment emitter. + * + * conic_to :: + * The second-order Bezier arc emitter. + * + * cubic_to :: + * The third-order Bezier arc emitter. + * + * shift :: + * The shift that is applied to coordinates before they are sent to the + * emitter. + * + * delta :: + * The delta that is applied to coordinates before they are sent to the + * emitter, but after the shift. + * + * @note: + * The point coordinates sent to the emitters are the transformed version + * of the original coordinates (this is important for high accuracy + * during scan-conversion). The transformation is simple: + * + * ``` + * x' = (x << shift) - delta + * y' = (y << shift) - delta + * ``` + * + * Set the values of `shift` and `delta` to~0 to get the original point + * coordinates. + */ + typedef struct FT_Outline_Funcs_ + { + FT_Outline_MoveToFunc move_to; + FT_Outline_LineToFunc line_to; + FT_Outline_ConicToFunc conic_to; + FT_Outline_CubicToFunc cubic_to; + + int shift; + FT_Pos delta; + + } FT_Outline_Funcs; + + + /************************************************************************** + * + * @section: + * basic_types + * + */ + + + /************************************************************************** + * + * @macro: + * FT_IMAGE_TAG + * + * @description: + * This macro converts four-letter tags to an unsigned long type. + * + * @note: + * Since many 16-bit compilers don't like 32-bit enumerations, you should + * redefine this macro in case of problems to something like this: + * + * ``` + * #define FT_IMAGE_TAG( value, _x1, _x2, _x3, _x4 ) value + * ``` + * + * to get a simple enumeration without assigning special numbers. + */ +#ifndef FT_IMAGE_TAG + +#define FT_IMAGE_TAG( value, _x1, _x2, _x3, _x4 ) \ + value = ( ( FT_STATIC_BYTE_CAST( unsigned long, _x1 ) << 24 ) | \ + ( FT_STATIC_BYTE_CAST( unsigned long, _x2 ) << 16 ) | \ + ( FT_STATIC_BYTE_CAST( unsigned long, _x3 ) << 8 ) | \ + FT_STATIC_BYTE_CAST( unsigned long, _x4 ) ) + +#endif /* FT_IMAGE_TAG */ + + + /************************************************************************** + * + * @enum: + * FT_Glyph_Format + * + * @description: + * An enumeration type used to describe the format of a given glyph + * image. Note that this version of FreeType only supports two image + * formats, even though future font drivers will be able to register + * their own format. + * + * @values: + * FT_GLYPH_FORMAT_NONE :: + * The value~0 is reserved. + * + * FT_GLYPH_FORMAT_COMPOSITE :: + * The glyph image is a composite of several other images. This format + * is _only_ used with @FT_LOAD_NO_RECURSE, and is used to report + * compound glyphs (like accented characters). + * + * FT_GLYPH_FORMAT_BITMAP :: + * The glyph image is a bitmap, and can be described as an @FT_Bitmap. + * You generally need to access the `bitmap` field of the + * @FT_GlyphSlotRec structure to read it. + * + * FT_GLYPH_FORMAT_OUTLINE :: + * The glyph image is a vectorial outline made of line segments and + * Bezier arcs; it can be described as an @FT_Outline; you generally + * want to access the `outline` field of the @FT_GlyphSlotRec structure + * to read it. + * + * FT_GLYPH_FORMAT_PLOTTER :: + * The glyph image is a vectorial path with no inside and outside + * contours. Some Type~1 fonts, like those in the Hershey family, + * contain glyphs in this format. These are described as @FT_Outline, + * but FreeType isn't currently capable of rendering them correctly. + * + * FT_GLYPH_FORMAT_SVG :: + * [Since 2.12] The glyph is represented by an SVG document in the + * 'SVG~' table. + */ + typedef enum FT_Glyph_Format_ + { + FT_IMAGE_TAG( FT_GLYPH_FORMAT_NONE, 0, 0, 0, 0 ), + + FT_IMAGE_TAG( FT_GLYPH_FORMAT_COMPOSITE, 'c', 'o', 'm', 'p' ), + FT_IMAGE_TAG( FT_GLYPH_FORMAT_BITMAP, 'b', 'i', 't', 's' ), + FT_IMAGE_TAG( FT_GLYPH_FORMAT_OUTLINE, 'o', 'u', 't', 'l' ), + FT_IMAGE_TAG( FT_GLYPH_FORMAT_PLOTTER, 'p', 'l', 'o', 't' ), + FT_IMAGE_TAG( FT_GLYPH_FORMAT_SVG, 'S', 'V', 'G', ' ' ) + + } FT_Glyph_Format; + + + /* these constants are deprecated; use the corresponding */ + /* `FT_Glyph_Format` values instead. */ +#define ft_glyph_format_none FT_GLYPH_FORMAT_NONE +#define ft_glyph_format_composite FT_GLYPH_FORMAT_COMPOSITE +#define ft_glyph_format_bitmap FT_GLYPH_FORMAT_BITMAP +#define ft_glyph_format_outline FT_GLYPH_FORMAT_OUTLINE +#define ft_glyph_format_plotter FT_GLYPH_FORMAT_PLOTTER + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** R A S T E R D E F I N I T I O N S *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + + /************************************************************************** + * + * @section: + * raster + * + * @title: + * Scanline Converter + * + * @abstract: + * How vectorial outlines are converted into bitmaps and pixmaps. + * + * @description: + * A raster or a rasterizer is a scan converter in charge of producing a + * pixel coverage bitmap that can be used as an alpha channel when + * compositing a glyph with a background. FreeType comes with two + * rasterizers: bilevel `raster1` and anti-aliased `smooth` are two + * separate modules. They are usually called from the high-level + * @FT_Load_Glyph or @FT_Render_Glyph functions and produce the entire + * coverage bitmap at once, while staying largely invisible to users. + * + * Instead of working with complete coverage bitmaps, it is also possible + * to intercept consecutive pixel runs on the same scanline with the same + * coverage, called _spans_, and process them individually. Only the + * `smooth` rasterizer permits this when calling @FT_Outline_Render with + * @FT_Raster_Params as described below. + * + * Working with either complete bitmaps or spans it is important to think + * of them as colorless coverage objects suitable as alpha channels to + * blend arbitrary colors with a background. For best results, it is + * recommended to use gamma correction, too. + * + * This section also describes the public API needed to set up alternative + * @FT_Renderer modules. + * + * @order: + * FT_Span + * FT_SpanFunc + * FT_Raster_Params + * FT_RASTER_FLAG_XXX + * + * FT_Raster + * FT_Raster_NewFunc + * FT_Raster_DoneFunc + * FT_Raster_ResetFunc + * FT_Raster_SetModeFunc + * FT_Raster_RenderFunc + * FT_Raster_Funcs + * + */ + + + /************************************************************************** + * + * @struct: + * FT_Span + * + * @description: + * A structure to model a single span of consecutive pixels when + * rendering an anti-aliased bitmap. + * + * @fields: + * x :: + * The span's horizontal start position. + * + * len :: + * The span's length in pixels. + * + * coverage :: + * The span color/coverage, ranging from 0 (background) to 255 + * (foreground). + * + * @note: + * This structure is used by the span drawing callback type named + * @FT_SpanFunc that takes the y~coordinate of the span as a parameter. + * + * The anti-aliased rasterizer produces coverage values from 0 to 255, + * that is, from completely transparent to completely opaque. + */ + typedef struct FT_Span_ + { + short x; + unsigned short len; + unsigned char coverage; + + } FT_Span; + + + /************************************************************************** + * + * @functype: + * FT_SpanFunc + * + * @description: + * A function used as a call-back by the anti-aliased renderer in order + * to let client applications draw themselves the pixel spans on each + * scan line. + * + * @input: + * y :: + * The scanline's upward y~coordinate. + * + * count :: + * The number of spans to draw on this scanline. + * + * spans :: + * A table of `count` spans to draw on the scanline. + * + * user :: + * User-supplied data that is passed to the callback. + * + * @note: + * This callback allows client applications to directly render the spans + * of the anti-aliased bitmap to any kind of surfaces. + * + * This can be used to write anti-aliased outlines directly to a given + * background bitmap using alpha compositing. It can also be used for + * oversampling and averaging. + */ + typedef void + (*FT_SpanFunc)( int y, + int count, + const FT_Span* spans, + void* user ); + +#define FT_Raster_Span_Func FT_SpanFunc + + + /************************************************************************** + * + * @functype: + * FT_Raster_BitTest_Func + * + * @description: + * Deprecated, unimplemented. + */ + typedef int + (*FT_Raster_BitTest_Func)( int y, + int x, + void* user ); + + + /************************************************************************** + * + * @functype: + * FT_Raster_BitSet_Func + * + * @description: + * Deprecated, unimplemented. + */ + typedef void + (*FT_Raster_BitSet_Func)( int y, + int x, + void* user ); + + + /************************************************************************** + * + * @enum: + * FT_RASTER_FLAG_XXX + * + * @description: + * A list of bit flag constants as used in the `flags` field of a + * @FT_Raster_Params structure. + * + * @values: + * FT_RASTER_FLAG_DEFAULT :: + * This value is 0. + * + * FT_RASTER_FLAG_AA :: + * This flag is set to indicate that an anti-aliased glyph image should + * be generated. Otherwise, it will be monochrome (1-bit). + * + * FT_RASTER_FLAG_DIRECT :: + * This flag is set to indicate direct rendering. In this mode, client + * applications must provide their own span callback. This lets them + * directly draw or compose over an existing bitmap. If this bit is + * _not_ set, the target pixmap's buffer _must_ be zeroed before + * rendering and the output will be clipped to its size. + * + * Direct rendering is only possible with anti-aliased glyphs. + * + * FT_RASTER_FLAG_CLIP :: + * This flag is only used in direct rendering mode. If set, the output + * will be clipped to a box specified in the `clip_box` field of the + * @FT_Raster_Params structure. Otherwise, the `clip_box` is + * effectively set to the bounding box and all spans are generated. + * + * FT_RASTER_FLAG_SDF :: + * This flag is set to indicate that a signed distance field glyph + * image should be generated. This is only used while rendering with + * the @FT_RENDER_MODE_SDF render mode. + */ +#define FT_RASTER_FLAG_DEFAULT 0x0 +#define FT_RASTER_FLAG_AA 0x1 +#define FT_RASTER_FLAG_DIRECT 0x2 +#define FT_RASTER_FLAG_CLIP 0x4 +#define FT_RASTER_FLAG_SDF 0x8 + + /* these constants are deprecated; use the corresponding */ + /* `FT_RASTER_FLAG_XXX` values instead */ +#define ft_raster_flag_default FT_RASTER_FLAG_DEFAULT +#define ft_raster_flag_aa FT_RASTER_FLAG_AA +#define ft_raster_flag_direct FT_RASTER_FLAG_DIRECT +#define ft_raster_flag_clip FT_RASTER_FLAG_CLIP + + + /************************************************************************** + * + * @struct: + * FT_Raster_Params + * + * @description: + * A structure to hold the parameters used by a raster's render function, + * passed as an argument to @FT_Outline_Render. + * + * @fields: + * target :: + * The target bitmap. + * + * source :: + * A pointer to the source glyph image (e.g., an @FT_Outline). + * + * flags :: + * The rendering flags. + * + * gray_spans :: + * The gray span drawing callback. + * + * black_spans :: + * Unused. + * + * bit_test :: + * Unused. + * + * bit_set :: + * Unused. + * + * user :: + * User-supplied data that is passed to each drawing callback. + * + * clip_box :: + * An optional span clipping box expressed in _integer_ pixels + * (not in 26.6 fixed-point units). + * + * @note: + * The @FT_RASTER_FLAG_AA bit flag must be set in the `flags` to + * generate an anti-aliased glyph bitmap, otherwise a monochrome bitmap + * is generated. The `target` should have appropriate pixel mode and its + * dimensions define the clipping region. + * + * If both @FT_RASTER_FLAG_AA and @FT_RASTER_FLAG_DIRECT bit flags + * are set in `flags`, the raster calls an @FT_SpanFunc callback + * `gray_spans` with `user` data as an argument ignoring `target`. This + * allows direct composition over a pre-existing user surface to perform + * the span drawing and composition. To optionally clip the spans, set + * the @FT_RASTER_FLAG_CLIP flag and `clip_box`. The monochrome raster + * does not support the direct mode. + * + * The gray-level rasterizer always uses 256 gray levels. If you want + * fewer gray levels, you have to use @FT_RASTER_FLAG_DIRECT and reduce + * the levels in the callback function. + */ + typedef struct FT_Raster_Params_ + { + const FT_Bitmap* target; + const void* source; + int flags; + FT_SpanFunc gray_spans; + FT_SpanFunc black_spans; /* unused */ + FT_Raster_BitTest_Func bit_test; /* unused */ + FT_Raster_BitSet_Func bit_set; /* unused */ + void* user; + FT_BBox clip_box; + + } FT_Raster_Params; + + + /************************************************************************** + * + * @type: + * FT_Raster + * + * @description: + * An opaque handle (pointer) to a raster object. Each object can be + * used independently to convert an outline into a bitmap or pixmap. + * + * @note: + * In FreeType 2, all rasters are now encapsulated within specific + * @FT_Renderer modules and only used in their context. + * + */ + typedef struct FT_RasterRec_* FT_Raster; + + + /************************************************************************** + * + * @functype: + * FT_Raster_NewFunc + * + * @description: + * A function used to create a new raster object. + * + * @input: + * memory :: + * A handle to the memory allocator. + * + * @output: + * raster :: + * A handle to the new raster object. + * + * @return: + * Error code. 0~means success. + * + * @note: + * The `memory` parameter is a typeless pointer in order to avoid + * un-wanted dependencies on the rest of the FreeType code. In practice, + * it is an @FT_Memory object, i.e., a handle to the standard FreeType + * memory allocator. However, this field can be completely ignored by a + * given raster implementation. + */ + typedef int + (*FT_Raster_NewFunc)( void* memory, + FT_Raster* raster ); + +#define FT_Raster_New_Func FT_Raster_NewFunc + + + /************************************************************************** + * + * @functype: + * FT_Raster_DoneFunc + * + * @description: + * A function used to destroy a given raster object. + * + * @input: + * raster :: + * A handle to the raster object. + */ + typedef void + (*FT_Raster_DoneFunc)( FT_Raster raster ); + +#define FT_Raster_Done_Func FT_Raster_DoneFunc + + + /************************************************************************** + * + * @functype: + * FT_Raster_ResetFunc + * + * @description: + * FreeType used to provide an area of memory called the 'render pool' + * available to all registered rasterizers. This was not thread safe, + * however, and now FreeType never allocates this pool. + * + * This function is called after a new raster object is created. + * + * @input: + * raster :: + * A handle to the new raster object. + * + * pool_base :: + * Previously, the address in memory of the render pool. Set this to + * `NULL`. + * + * pool_size :: + * Previously, the size in bytes of the render pool. Set this to 0. + * + * @note: + * Rasterizers should rely on dynamic or stack allocation if they want to + * (a handle to the memory allocator is passed to the rasterizer + * constructor). + */ + typedef void + (*FT_Raster_ResetFunc)( FT_Raster raster, + unsigned char* pool_base, + unsigned long pool_size ); + +#define FT_Raster_Reset_Func FT_Raster_ResetFunc + + + /************************************************************************** + * + * @functype: + * FT_Raster_SetModeFunc + * + * @description: + * This function is a generic facility to change modes or attributes in a + * given raster. This can be used for debugging purposes, or simply to + * allow implementation-specific 'features' in a given raster module. + * + * @input: + * raster :: + * A handle to the new raster object. + * + * mode :: + * A 4-byte tag used to name the mode or property. + * + * args :: + * A pointer to the new mode/property to use. + */ + typedef int + (*FT_Raster_SetModeFunc)( FT_Raster raster, + unsigned long mode, + void* args ); + +#define FT_Raster_Set_Mode_Func FT_Raster_SetModeFunc + + + /************************************************************************** + * + * @functype: + * FT_Raster_RenderFunc + * + * @description: + * Invoke a given raster to scan-convert a given glyph image into a + * target bitmap. + * + * @input: + * raster :: + * A handle to the raster object. + * + * params :: + * A pointer to an @FT_Raster_Params structure used to store the + * rendering parameters. + * + * @return: + * Error code. 0~means success. + * + * @note: + * The exact format of the source image depends on the raster's glyph + * format defined in its @FT_Raster_Funcs structure. It can be an + * @FT_Outline or anything else in order to support a large array of + * glyph formats. + * + * Note also that the render function can fail and return a + * `FT_Err_Unimplemented_Feature` error code if the raster used does not + * support direct composition. + */ + typedef int + (*FT_Raster_RenderFunc)( FT_Raster raster, + const FT_Raster_Params* params ); + +#define FT_Raster_Render_Func FT_Raster_RenderFunc + + + /************************************************************************** + * + * @struct: + * FT_Raster_Funcs + * + * @description: + * A structure used to describe a given raster class to the library. + * + * @fields: + * glyph_format :: + * The supported glyph format for this raster. + * + * raster_new :: + * The raster constructor. + * + * raster_reset :: + * Used to reset the render pool within the raster. + * + * raster_render :: + * A function to render a glyph into a given bitmap. + * + * raster_done :: + * The raster destructor. + */ + typedef struct FT_Raster_Funcs_ + { + FT_Glyph_Format glyph_format; + + FT_Raster_NewFunc raster_new; + FT_Raster_ResetFunc raster_reset; + FT_Raster_SetModeFunc raster_set_mode; + FT_Raster_RenderFunc raster_render; + FT_Raster_DoneFunc raster_done; + + } FT_Raster_Funcs; + + /* */ + + +FT_END_HEADER + +#endif /* FTIMAGE_H_ */ + + +/* END */ + + +/* Local Variables: */ +/* coding: utf-8 */ +/* End: */ diff --git a/Example/include/freetype/ftincrem.h b/Example/include/freetype/ftincrem.h new file mode 100644 index 0000000..2d4f5de --- /dev/null +++ b/Example/include/freetype/ftincrem.h @@ -0,0 +1,348 @@ +/**************************************************************************** + * + * ftincrem.h + * + * FreeType incremental loading (specification). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTINCREM_H_ +#define FTINCREM_H_ + +#include +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @section: + * incremental + * + * @title: + * Incremental Loading + * + * @abstract: + * Custom Glyph Loading. + * + * @description: + * This section contains various functions used to perform so-called + * 'incremental' glyph loading. This is a mode where all glyphs loaded + * from a given @FT_Face are provided by the client application. + * + * Apart from that, all other tables are loaded normally from the font + * file. This mode is useful when FreeType is used within another + * engine, e.g., a PostScript Imaging Processor. + * + * To enable this mode, you must use @FT_Open_Face, passing an + * @FT_Parameter with the @FT_PARAM_TAG_INCREMENTAL tag and an + * @FT_Incremental_Interface value. See the comments for + * @FT_Incremental_InterfaceRec for an example. + * + */ + + + /************************************************************************** + * + * @type: + * FT_Incremental + * + * @description: + * An opaque type describing a user-provided object used to implement + * 'incremental' glyph loading within FreeType. This is used to support + * embedded fonts in certain environments (e.g., PostScript + * interpreters), where the glyph data isn't in the font file, or must be + * overridden by different values. + * + * @note: + * It is up to client applications to create and implement + * @FT_Incremental objects, as long as they provide implementations for + * the methods @FT_Incremental_GetGlyphDataFunc, + * @FT_Incremental_FreeGlyphDataFunc and + * @FT_Incremental_GetGlyphMetricsFunc. + * + * See the description of @FT_Incremental_InterfaceRec to understand how + * to use incremental objects with FreeType. + * + */ + typedef struct FT_IncrementalRec_* FT_Incremental; + + + /************************************************************************** + * + * @struct: + * FT_Incremental_MetricsRec + * + * @description: + * A small structure used to contain the basic glyph metrics returned by + * the @FT_Incremental_GetGlyphMetricsFunc method. + * + * @fields: + * bearing_x :: + * Left bearing, in font units. + * + * bearing_y :: + * Top bearing, in font units. + * + * advance :: + * Horizontal component of glyph advance, in font units. + * + * advance_v :: + * Vertical component of glyph advance, in font units. + * + * @note: + * These correspond to horizontal or vertical metrics depending on the + * value of the `vertical` argument to the function + * @FT_Incremental_GetGlyphMetricsFunc. + * + */ + typedef struct FT_Incremental_MetricsRec_ + { + FT_Long bearing_x; + FT_Long bearing_y; + FT_Long advance; + FT_Long advance_v; /* since 2.3.12 */ + + } FT_Incremental_MetricsRec; + + + /************************************************************************** + * + * @struct: + * FT_Incremental_Metrics + * + * @description: + * A handle to an @FT_Incremental_MetricsRec structure. + * + */ + typedef struct FT_Incremental_MetricsRec_* FT_Incremental_Metrics; + + + /************************************************************************** + * + * @type: + * FT_Incremental_GetGlyphDataFunc + * + * @description: + * A function called by FreeType to access a given glyph's data bytes + * during @FT_Load_Glyph or @FT_Load_Char if incremental loading is + * enabled. + * + * Note that the format of the glyph's data bytes depends on the font + * file format. For TrueType, it must correspond to the raw bytes within + * the 'glyf' table. For PostScript formats, it must correspond to the + * **unencrypted** charstring bytes, without any `lenIV` header. It is + * undefined for any other format. + * + * @input: + * incremental :: + * Handle to an opaque @FT_Incremental handle provided by the client + * application. + * + * glyph_index :: + * Index of relevant glyph. + * + * @output: + * adata :: + * A structure describing the returned glyph data bytes (which will be + * accessed as a read-only byte block). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If this function returns successfully the method + * @FT_Incremental_FreeGlyphDataFunc will be called later to release the + * data bytes. + * + * Nested calls to @FT_Incremental_GetGlyphDataFunc can happen for + * compound glyphs. + * + */ + typedef FT_Error + (*FT_Incremental_GetGlyphDataFunc)( FT_Incremental incremental, + FT_UInt glyph_index, + FT_Data* adata ); + + + /************************************************************************** + * + * @type: + * FT_Incremental_FreeGlyphDataFunc + * + * @description: + * A function used to release the glyph data bytes returned by a + * successful call to @FT_Incremental_GetGlyphDataFunc. + * + * @input: + * incremental :: + * A handle to an opaque @FT_Incremental handle provided by the client + * application. + * + * data :: + * A structure describing the glyph data bytes (which will be accessed + * as a read-only byte block). + * + */ + typedef void + (*FT_Incremental_FreeGlyphDataFunc)( FT_Incremental incremental, + FT_Data* data ); + + + /************************************************************************** + * + * @type: + * FT_Incremental_GetGlyphMetricsFunc + * + * @description: + * A function used to retrieve the basic metrics of a given glyph index + * before accessing its data. This allows for handling font types such + * as PCL~XL Format~1, Class~2 downloaded TrueType fonts, where the glyph + * metrics (`hmtx` and `vmtx` tables) are permitted to be omitted from + * the font, and the relevant metrics included in the header of the glyph + * outline data. Importantly, this is not intended to allow custom glyph + * metrics (for example, Postscript Metrics dictionaries), because that + * conflicts with the requirements of outline hinting. Such custom + * metrics must be handled separately, by the calling application. + * + * @input: + * incremental :: + * A handle to an opaque @FT_Incremental handle provided by the client + * application. + * + * glyph_index :: + * Index of relevant glyph. + * + * vertical :: + * If true, return vertical metrics. + * + * ametrics :: + * This parameter is used for both input and output. The original + * glyph metrics, if any, in font units. If metrics are not available + * all the values must be set to zero. + * + * @output: + * ametrics :: + * The glyph metrics in font units. + * + */ + typedef FT_Error + (*FT_Incremental_GetGlyphMetricsFunc) + ( FT_Incremental incremental, + FT_UInt glyph_index, + FT_Bool vertical, + FT_Incremental_MetricsRec *ametrics ); + + + /************************************************************************** + * + * @struct: + * FT_Incremental_FuncsRec + * + * @description: + * A table of functions for accessing fonts that load data incrementally. + * Used in @FT_Incremental_InterfaceRec. + * + * @fields: + * get_glyph_data :: + * The function to get glyph data. Must not be null. + * + * free_glyph_data :: + * The function to release glyph data. Must not be null. + * + * get_glyph_metrics :: + * The function to get glyph metrics. May be null if the font does not + * require it. + * + */ + typedef struct FT_Incremental_FuncsRec_ + { + FT_Incremental_GetGlyphDataFunc get_glyph_data; + FT_Incremental_FreeGlyphDataFunc free_glyph_data; + FT_Incremental_GetGlyphMetricsFunc get_glyph_metrics; + + } FT_Incremental_FuncsRec; + + + /************************************************************************** + * + * @struct: + * FT_Incremental_InterfaceRec + * + * @description: + * A structure to be used with @FT_Open_Face to indicate that the user + * wants to support incremental glyph loading. You should use it with + * @FT_PARAM_TAG_INCREMENTAL as in the following example: + * + * ``` + * FT_Incremental_InterfaceRec inc_int; + * FT_Parameter parameter; + * FT_Open_Args open_args; + * + * + * // set up incremental descriptor + * inc_int.funcs = my_funcs; + * inc_int.object = my_object; + * + * // set up optional parameter + * parameter.tag = FT_PARAM_TAG_INCREMENTAL; + * parameter.data = &inc_int; + * + * // set up FT_Open_Args structure + * open_args.flags = FT_OPEN_PATHNAME | FT_OPEN_PARAMS; + * open_args.pathname = my_font_pathname; + * open_args.num_params = 1; + * open_args.params = ¶meter; // we use one optional argument + * + * // open the font + * error = FT_Open_Face( library, &open_args, index, &face ); + * ... + * ``` + * + */ + typedef struct FT_Incremental_InterfaceRec_ + { + const FT_Incremental_FuncsRec* funcs; + FT_Incremental object; + + } FT_Incremental_InterfaceRec; + + + /************************************************************************** + * + * @type: + * FT_Incremental_Interface + * + * @description: + * A pointer to an @FT_Incremental_InterfaceRec structure. + * + */ + typedef FT_Incremental_InterfaceRec* FT_Incremental_Interface; + + + /* */ + + +FT_END_HEADER + +#endif /* FTINCREM_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftlcdfil.h b/Example/include/freetype/ftlcdfil.h new file mode 100644 index 0000000..d3723e1 --- /dev/null +++ b/Example/include/freetype/ftlcdfil.h @@ -0,0 +1,323 @@ +/**************************************************************************** + * + * ftlcdfil.h + * + * FreeType API for color filtering of subpixel bitmap glyphs + * (specification). + * + * Copyright (C) 2006-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTLCDFIL_H_ +#define FTLCDFIL_H_ + +#include +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @section: + * lcd_rendering + * + * @title: + * Subpixel Rendering + * + * @abstract: + * API to control subpixel rendering. + * + * @description: + * FreeType provides two alternative subpixel rendering technologies. + * Should you define `FT_CONFIG_OPTION_SUBPIXEL_RENDERING` in your + * `ftoption.h` file, this enables ClearType-style rendering. + * Otherwise, Harmony LCD rendering is enabled. These technologies are + * controlled differently and API described below, although always + * available, performs its function when appropriate method is enabled + * and does nothing otherwise. + * + * ClearType-style LCD rendering exploits the color-striped structure of + * LCD pixels, increasing the available resolution in the direction of + * the stripe (usually horizontal RGB) by a factor of~3. Using the + * subpixel coverages unfiltered can create severe color fringes + * especially when rendering thin features. Indeed, to produce + * black-on-white text, the nearby color subpixels must be dimmed + * evenly. Therefore, an equalizing 5-tap FIR filter should be applied + * to subpixel coverages regardless of pixel boundaries and should have + * these properties: + * + * 1. It should be symmetrical, like {~a, b, c, b, a~}, to avoid + * any shifts in appearance. + * + * 2. It should be color-balanced, meaning a~+ b~=~c, to reduce color + * fringes by distributing the computed coverage for one subpixel to + * all subpixels equally. + * + * 3. It should be normalized, meaning 2a~+ 2b~+ c~=~1.0 to maintain + * overall brightness. + * + * Boxy 3-tap filter {0, 1/3, 1/3, 1/3, 0} is sharper but is less + * forgiving of non-ideal gamma curves of a screen (and viewing angles), + * beveled filters are fuzzier but more tolerant. + * + * Use the @FT_Library_SetLcdFilter or @FT_Library_SetLcdFilterWeights + * API to specify a low-pass filter, which is then applied to + * subpixel-rendered bitmaps generated through @FT_Render_Glyph. + * + * Harmony LCD rendering is suitable to panels with any regular subpixel + * structure, not just monitors with 3 color striped subpixels, as long + * as the color subpixels have fixed positions relative to the pixel + * center. In this case, each color channel can be rendered separately + * after shifting the outline opposite to the subpixel shift so that the + * coverage maps are aligned. This method is immune to color fringes + * because the shifts do not change integral coverage. + * + * The subpixel geometry must be specified by xy-coordinates for each + * subpixel. By convention they may come in the RGB order: {{-1/3, 0}, + * {0, 0}, {1/3, 0}} for standard RGB striped panel or {{-1/6, 1/4}, + * {-1/6, -1/4}, {1/3, 0}} for a certain PenTile panel. + * + * Use the @FT_Library_SetLcdGeometry API to specify subpixel positions. + * If one follows the RGB order convention, the same order applies to the + * resulting @FT_PIXEL_MODE_LCD and @FT_PIXEL_MODE_LCD_V bitmaps. Note, + * however, that the coordinate frame for the latter must be rotated + * clockwise. Harmony with default LCD geometry is equivalent to + * ClearType with light filter. + * + * As a result of ClearType filtering or Harmony shifts, the resulting + * dimensions of LCD bitmaps can be slightly wider or taller than the + * dimensions the original outline with regard to the pixel grid. + * For example, for @FT_RENDER_MODE_LCD, the filter adds 2~subpixels to + * the left, and 2~subpixels to the right. The bitmap offset values are + * adjusted accordingly, so clients shouldn't need to modify their layout + * and glyph positioning code when enabling the filter. + * + * The ClearType and Harmony rendering is applicable to glyph bitmaps + * rendered through @FT_Render_Glyph, @FT_Load_Glyph, @FT_Load_Char, and + * @FT_Glyph_To_Bitmap, when @FT_RENDER_MODE_LCD or @FT_RENDER_MODE_LCD_V + * is specified. This API does not control @FT_Outline_Render and + * @FT_Outline_Get_Bitmap. + * + * The described algorithms can completely remove color artefacts when + * combined with gamma-corrected alpha blending in linear space. Each of + * the 3~alpha values (subpixels) must by independently used to blend one + * color channel. That is, red alpha blends the red channel of the text + * color with the red channel of the background pixel. + */ + + + /************************************************************************** + * + * @enum: + * FT_LcdFilter + * + * @description: + * A list of values to identify various types of LCD filters. + * + * @values: + * FT_LCD_FILTER_NONE :: + * Do not perform filtering. When used with subpixel rendering, this + * results in sometimes severe color fringes. + * + * FT_LCD_FILTER_DEFAULT :: + * This is a beveled, normalized, and color-balanced five-tap filter + * with weights of [0x08 0x4D 0x56 0x4D 0x08] in 1/256 units. + * + * FT_LCD_FILTER_LIGHT :: + * this is a boxy, normalized, and color-balanced three-tap filter with + * weights of [0x00 0x55 0x56 0x55 0x00] in 1/256 units. + * + * FT_LCD_FILTER_LEGACY :: + * FT_LCD_FILTER_LEGACY1 :: + * This filter corresponds to the original libXft color filter. It + * provides high contrast output but can exhibit really bad color + * fringes if glyphs are not extremely well hinted to the pixel grid. + * This filter is only provided for comparison purposes, and might be + * disabled or stay unsupported in the future. The second value is + * provided for compatibility with FontConfig, which historically used + * different enumeration, sometimes incorrectly forwarded to FreeType. + * + * @since: + * 2.3.0 (`FT_LCD_FILTER_LEGACY1` since 2.6.2) + */ + typedef enum FT_LcdFilter_ + { + FT_LCD_FILTER_NONE = 0, + FT_LCD_FILTER_DEFAULT = 1, + FT_LCD_FILTER_LIGHT = 2, + FT_LCD_FILTER_LEGACY1 = 3, + FT_LCD_FILTER_LEGACY = 16, + + FT_LCD_FILTER_MAX /* do not remove */ + + } FT_LcdFilter; + + + /************************************************************************** + * + * @function: + * FT_Library_SetLcdFilter + * + * @description: + * This function is used to change filter applied to LCD decimated + * bitmaps, like the ones used when calling @FT_Render_Glyph with + * @FT_RENDER_MODE_LCD or @FT_RENDER_MODE_LCD_V. + * + * @input: + * library :: + * A handle to the target library instance. + * + * filter :: + * The filter type. + * + * You can use @FT_LCD_FILTER_NONE here to disable this feature, or + * @FT_LCD_FILTER_DEFAULT to use a default filter that should work well + * on most LCD screens. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Since 2.10.3 the LCD filtering is enabled with @FT_LCD_FILTER_DEFAULT. + * It is no longer necessary to call this function explicitly except + * to choose a different filter or disable filtering altogether with + * @FT_LCD_FILTER_NONE. + * + * This function does nothing but returns `FT_Err_Unimplemented_Feature` + * if the configuration macro `FT_CONFIG_OPTION_SUBPIXEL_RENDERING` is + * not defined in your build of the library. + * + * @since: + * 2.3.0 + */ + FT_EXPORT( FT_Error ) + FT_Library_SetLcdFilter( FT_Library library, + FT_LcdFilter filter ); + + + /************************************************************************** + * + * @function: + * FT_Library_SetLcdFilterWeights + * + * @description: + * This function can be used to enable LCD filter with custom weights, + * instead of using presets in @FT_Library_SetLcdFilter. + * + * @input: + * library :: + * A handle to the target library instance. + * + * weights :: + * A pointer to an array; the function copies the first five bytes and + * uses them to specify the filter weights in 1/256 units. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function does nothing but returns `FT_Err_Unimplemented_Feature` + * if the configuration macro `FT_CONFIG_OPTION_SUBPIXEL_RENDERING` is + * not defined in your build of the library. + * + * LCD filter weights can also be set per face using @FT_Face_Properties + * with @FT_PARAM_TAG_LCD_FILTER_WEIGHTS. + * + * @since: + * 2.4.0 + */ + FT_EXPORT( FT_Error ) + FT_Library_SetLcdFilterWeights( FT_Library library, + unsigned char *weights ); + + + /************************************************************************** + * + * @type: + * FT_LcdFiveTapFilter + * + * @description: + * A typedef for passing the five LCD filter weights to + * @FT_Face_Properties within an @FT_Parameter structure. + * + * @since: + * 2.8 + * + */ +#define FT_LCD_FILTER_FIVE_TAPS 5 + + typedef FT_Byte FT_LcdFiveTapFilter[FT_LCD_FILTER_FIVE_TAPS]; + + + /************************************************************************** + * + * @function: + * FT_Library_SetLcdGeometry + * + * @description: + * This function can be used to modify default positions of color + * subpixels, which controls Harmony LCD rendering. + * + * @input: + * library :: + * A handle to the target library instance. + * + * sub :: + * A pointer to an array of 3 vectors in 26.6 fractional pixel format; + * the function modifies the default values, see the note below. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Subpixel geometry examples: + * + * - {{-21, 0}, {0, 0}, {21, 0}} is the default, corresponding to 3 color + * stripes shifted by a third of a pixel. This could be an RGB panel. + * + * - {{21, 0}, {0, 0}, {-21, 0}} looks the same as the default but can + * specify a BGR panel instead, while keeping the bitmap in the same + * RGB888 format. + * + * - {{0, 21}, {0, 0}, {0, -21}} is the vertical RGB, but the bitmap + * stays RGB888 as a result. + * + * - {{-11, 16}, {-11, -16}, {22, 0}} is a certain PenTile arrangement. + * + * This function does nothing and returns `FT_Err_Unimplemented_Feature` + * in the context of ClearType-style subpixel rendering when + * `FT_CONFIG_OPTION_SUBPIXEL_RENDERING` is defined in your build of the + * library. + * + * @since: + * 2.10.0 + */ + FT_EXPORT( FT_Error ) + FT_Library_SetLcdGeometry( FT_Library library, + FT_Vector sub[3] ); + + /* */ + + +FT_END_HEADER + +#endif /* FTLCDFIL_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftlist.h b/Example/include/freetype/ftlist.h new file mode 100644 index 0000000..b553131 --- /dev/null +++ b/Example/include/freetype/ftlist.h @@ -0,0 +1,296 @@ +/**************************************************************************** + * + * ftlist.h + * + * Generic list support for FreeType (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This file implements functions relative to list processing. Its data + * structures are defined in `freetype.h`. + * + */ + + +#ifndef FTLIST_H_ +#define FTLIST_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * list_processing + * + * @title: + * List Processing + * + * @abstract: + * Simple management of lists. + * + * @description: + * This section contains various definitions related to list processing + * using doubly-linked nodes. + * + * @order: + * FT_List + * FT_ListNode + * FT_ListRec + * FT_ListNodeRec + * + * FT_List_Add + * FT_List_Insert + * FT_List_Find + * FT_List_Remove + * FT_List_Up + * FT_List_Iterate + * FT_List_Iterator + * FT_List_Finalize + * FT_List_Destructor + * + */ + + + /************************************************************************** + * + * @function: + * FT_List_Find + * + * @description: + * Find the list node for a given listed object. + * + * @input: + * list :: + * A pointer to the parent list. + * data :: + * The address of the listed object. + * + * @return: + * List node. `NULL` if it wasn't found. + */ + FT_EXPORT( FT_ListNode ) + FT_List_Find( FT_List list, + void* data ); + + + /************************************************************************** + * + * @function: + * FT_List_Add + * + * @description: + * Append an element to the end of a list. + * + * @inout: + * list :: + * A pointer to the parent list. + * node :: + * The node to append. + */ + FT_EXPORT( void ) + FT_List_Add( FT_List list, + FT_ListNode node ); + + + /************************************************************************** + * + * @function: + * FT_List_Insert + * + * @description: + * Insert an element at the head of a list. + * + * @inout: + * list :: + * A pointer to parent list. + * node :: + * The node to insert. + */ + FT_EXPORT( void ) + FT_List_Insert( FT_List list, + FT_ListNode node ); + + + /************************************************************************** + * + * @function: + * FT_List_Remove + * + * @description: + * Remove a node from a list. This function doesn't check whether the + * node is in the list! + * + * @input: + * node :: + * The node to remove. + * + * @inout: + * list :: + * A pointer to the parent list. + */ + FT_EXPORT( void ) + FT_List_Remove( FT_List list, + FT_ListNode node ); + + + /************************************************************************** + * + * @function: + * FT_List_Up + * + * @description: + * Move a node to the head/top of a list. Used to maintain LRU lists. + * + * @inout: + * list :: + * A pointer to the parent list. + * node :: + * The node to move. + */ + FT_EXPORT( void ) + FT_List_Up( FT_List list, + FT_ListNode node ); + + + /************************************************************************** + * + * @functype: + * FT_List_Iterator + * + * @description: + * An FT_List iterator function that is called during a list parse by + * @FT_List_Iterate. + * + * @input: + * node :: + * The current iteration list node. + * + * user :: + * A typeless pointer passed to @FT_List_Iterate. Can be used to point + * to the iteration's state. + */ + typedef FT_Error + (*FT_List_Iterator)( FT_ListNode node, + void* user ); + + + /************************************************************************** + * + * @function: + * FT_List_Iterate + * + * @description: + * Parse a list and calls a given iterator function on each element. + * Note that parsing is stopped as soon as one of the iterator calls + * returns a non-zero value. + * + * @input: + * list :: + * A handle to the list. + * iterator :: + * An iterator function, called on each node of the list. + * user :: + * A user-supplied field that is passed as the second argument to the + * iterator. + * + * @return: + * The result (a FreeType error code) of the last iterator call. + */ + FT_EXPORT( FT_Error ) + FT_List_Iterate( FT_List list, + FT_List_Iterator iterator, + void* user ); + + + /************************************************************************** + * + * @functype: + * FT_List_Destructor + * + * @description: + * An @FT_List iterator function that is called during a list + * finalization by @FT_List_Finalize to destroy all elements in a given + * list. + * + * @input: + * system :: + * The current system object. + * + * data :: + * The current object to destroy. + * + * user :: + * A typeless pointer passed to @FT_List_Iterate. It can be used to + * point to the iteration's state. + */ + typedef void + (*FT_List_Destructor)( FT_Memory memory, + void* data, + void* user ); + + + /************************************************************************** + * + * @function: + * FT_List_Finalize + * + * @description: + * Destroy all elements in the list as well as the list itself. + * + * @input: + * list :: + * A handle to the list. + * + * destroy :: + * A list destructor that will be applied to each element of the list. + * Set this to `NULL` if not needed. + * + * memory :: + * The current memory object that handles deallocation. + * + * user :: + * A user-supplied field that is passed as the last argument to the + * destructor. + * + * @note: + * This function expects that all nodes added by @FT_List_Add or + * @FT_List_Insert have been dynamically allocated. + */ + FT_EXPORT( void ) + FT_List_Finalize( FT_List list, + FT_List_Destructor destroy, + FT_Memory memory, + void* user ); + + /* */ + + +FT_END_HEADER + +#endif /* FTLIST_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftlogging.h b/Example/include/freetype/ftlogging.h new file mode 100644 index 0000000..53b8b89 --- /dev/null +++ b/Example/include/freetype/ftlogging.h @@ -0,0 +1,184 @@ +/**************************************************************************** + * + * ftlogging.h + * + * Additional debugging APIs. + * + * Copyright (C) 2020-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTLOGGING_H_ +#define FTLOGGING_H_ + + +#include +#include FT_CONFIG_CONFIG_H + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * debugging_apis + * + * @title: + * External Debugging APIs + * + * @abstract: + * Public APIs to control the `FT_DEBUG_LOGGING` macro. + * + * @description: + * This section contains the declarations of public functions that + * enables fine control of what the `FT_DEBUG_LOGGING` macro outputs. + * + */ + + + /************************************************************************** + * + * @function: + * FT_Trace_Set_Level + * + * @description: + * Change the levels of tracing components of FreeType at run time. + * + * @input: + * tracing_level :: + * New tracing value. + * + * @example: + * The following call makes FreeType trace everything but the 'memory' + * component. + * + * ``` + * FT_Trace_Set_Level( "any:7 memory:0" ); + * ``` + * + * @note: + * This function does nothing if compilation option `FT_DEBUG_LOGGING` + * isn't set. + * + * @since: + * 2.11 + * + */ + FT_EXPORT( void ) + FT_Trace_Set_Level( const char* tracing_level ); + + + /************************************************************************** + * + * @function: + * FT_Trace_Set_Default_Level + * + * @description: + * Reset tracing value of FreeType's components to the default value + * (i.e., to the value of the `FT2_DEBUG` environment value or to NULL + * if `FT2_DEBUG` is not set). + * + * @note: + * This function does nothing if compilation option `FT_DEBUG_LOGGING` + * isn't set. + * + * @since: + * 2.11 + * + */ + FT_EXPORT( void ) + FT_Trace_Set_Default_Level( void ); + + + /************************************************************************** + * + * @functype: + * FT_Custom_Log_Handler + * + * @description: + * A function typedef that is used to handle the logging of tracing and + * debug messages on a file system. + * + * @input: + * ft_component :: + * The name of `FT_COMPONENT` from which the current debug or error + * message is produced. + * + * fmt :: + * Actual debug or tracing message. + * + * args:: + * Arguments of debug or tracing messages. + * + * @since: + * 2.11 + * + */ + typedef void + (*FT_Custom_Log_Handler)( const char* ft_component, + const char* fmt, + va_list args ); + + + /************************************************************************** + * + * @function: + * FT_Set_Log_Handler + * + * @description: + * A function to set a custom log handler. + * + * @input: + * handler :: + * New logging function. + * + * @note: + * This function does nothing if compilation option `FT_DEBUG_LOGGING` + * isn't set. + * + * @since: + * 2.11 + * + */ + FT_EXPORT( void ) + FT_Set_Log_Handler( FT_Custom_Log_Handler handler ); + + + /************************************************************************** + * + * @function: + * FT_Set_Default_Log_Handler + * + * @description: + * A function to undo the effect of @FT_Set_Log_Handler, resetting the + * log handler to FreeType's built-in version. + * + * @note: + * This function does nothing if compilation option `FT_DEBUG_LOGGING` + * isn't set. + * + * @since: + * 2.11 + * + */ + FT_EXPORT( void ) + FT_Set_Default_Log_Handler( void ); + + /* */ + + +FT_END_HEADER + +#endif /* FTLOGGING_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftlzw.h b/Example/include/freetype/ftlzw.h new file mode 100644 index 0000000..adfd172 --- /dev/null +++ b/Example/include/freetype/ftlzw.h @@ -0,0 +1,100 @@ +/**************************************************************************** + * + * ftlzw.h + * + * LZW-compressed stream support. + * + * Copyright (C) 2004-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTLZW_H_ +#define FTLZW_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @section: + * lzw + * + * @title: + * LZW Streams + * + * @abstract: + * Using LZW-compressed font files. + * + * @description: + * In certain builds of the library, LZW compression recognition is + * automatically handled when calling @FT_New_Face or @FT_Open_Face. + * This means that if no font driver is capable of handling the raw + * compressed file, the library will try to open a LZW stream from it and + * re-open the face with it. + * + * The stream implementation is very basic and resets the decompression + * process each time seeking backwards is needed within the stream, + * which significantly undermines the performance. + * + * This section contains the declaration of LZW-specific functions. + * + */ + + /************************************************************************** + * + * @function: + * FT_Stream_OpenLZW + * + * @description: + * Open a new stream to parse LZW-compressed font files. This is mainly + * used to support the compressed `*.pcf.Z` fonts that come with XFree86. + * + * @input: + * stream :: + * The target embedding stream. + * + * source :: + * The source stream. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The source stream must be opened _before_ calling this function. + * + * Calling the internal function `FT_Stream_Close` on the new stream will + * **not** call `FT_Stream_Close` on the source stream. None of the + * stream objects will be released to the heap. + * + * This function may return `FT_Err_Unimplemented_Feature` if your build + * of FreeType was not compiled with LZW support. + */ + FT_EXPORT( FT_Error ) + FT_Stream_OpenLZW( FT_Stream stream, + FT_Stream source ); + + /* */ + + +FT_END_HEADER + +#endif /* FTLZW_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftmac.h b/Example/include/freetype/ftmac.h new file mode 100644 index 0000000..a91e38f --- /dev/null +++ b/Example/include/freetype/ftmac.h @@ -0,0 +1,289 @@ +/**************************************************************************** + * + * ftmac.h + * + * Additional Mac-specific API. + * + * Copyright (C) 1996-2023 by + * Just van Rossum, David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +/**************************************************************************** + * + * NOTE: Include this file after `FT_FREETYPE_H` and after any + * Mac-specific headers (because this header uses Mac types such as + * 'Handle', 'FSSpec', 'FSRef', etc.) + * + */ + + +#ifndef FTMAC_H_ +#define FTMAC_H_ + + + + +FT_BEGIN_HEADER + + + /* gcc-3.1 and later can warn about functions tagged as deprecated */ +#ifndef FT_DEPRECATED_ATTRIBUTE +#if defined( __GNUC__ ) && \ + ( ( __GNUC__ >= 4 ) || \ + ( ( __GNUC__ == 3 ) && ( __GNUC_MINOR__ >= 1 ) ) ) +#define FT_DEPRECATED_ATTRIBUTE __attribute__(( deprecated )) +#else +#define FT_DEPRECATED_ATTRIBUTE +#endif +#endif + + + /************************************************************************** + * + * @section: + * mac_specific + * + * @title: + * Mac Specific Interface + * + * @abstract: + * Only available on the Macintosh. + * + * @description: + * The following definitions are only available if FreeType is compiled + * on a Macintosh. + * + */ + + + /************************************************************************** + * + * @function: + * FT_New_Face_From_FOND + * + * @description: + * Create a new face object from a FOND resource. + * + * @inout: + * library :: + * A handle to the library resource. + * + * @input: + * fond :: + * A FOND resource. + * + * face_index :: + * Only supported for the -1 'sanity check' special case. + * + * @output: + * aface :: + * A handle to a new face object. + * + * @return: + * FreeType error code. 0~means success. + * + * @example: + * This function can be used to create @FT_Face objects from fonts that + * are installed in the system as follows. + * + * ``` + * fond = GetResource( 'FOND', fontName ); + * error = FT_New_Face_From_FOND( library, fond, 0, &face ); + * ``` + */ + FT_EXPORT( FT_Error ) + FT_New_Face_From_FOND( FT_Library library, + Handle fond, + FT_Long face_index, + FT_Face *aface ) + FT_DEPRECATED_ATTRIBUTE; + + + /************************************************************************** + * + * @function: + * FT_GetFile_From_Mac_Name + * + * @description: + * Return an FSSpec for the disk file containing the named font. + * + * @input: + * fontName :: + * Mac OS name of the font (e.g., Times New Roman Bold). + * + * @output: + * pathSpec :: + * FSSpec to the file. For passing to @FT_New_Face_From_FSSpec. + * + * face_index :: + * Index of the face. For passing to @FT_New_Face_From_FSSpec. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_GetFile_From_Mac_Name( const char* fontName, + FSSpec* pathSpec, + FT_Long* face_index ) + FT_DEPRECATED_ATTRIBUTE; + + + /************************************************************************** + * + * @function: + * FT_GetFile_From_Mac_ATS_Name + * + * @description: + * Return an FSSpec for the disk file containing the named font. + * + * @input: + * fontName :: + * Mac OS name of the font in ATS framework. + * + * @output: + * pathSpec :: + * FSSpec to the file. For passing to @FT_New_Face_From_FSSpec. + * + * face_index :: + * Index of the face. For passing to @FT_New_Face_From_FSSpec. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_GetFile_From_Mac_ATS_Name( const char* fontName, + FSSpec* pathSpec, + FT_Long* face_index ) + FT_DEPRECATED_ATTRIBUTE; + + + /************************************************************************** + * + * @function: + * FT_GetFilePath_From_Mac_ATS_Name + * + * @description: + * Return a pathname of the disk file and face index for given font name + * that is handled by ATS framework. + * + * @input: + * fontName :: + * Mac OS name of the font in ATS framework. + * + * @output: + * path :: + * Buffer to store pathname of the file. For passing to @FT_New_Face. + * The client must allocate this buffer before calling this function. + * + * maxPathSize :: + * Lengths of the buffer `path` that client allocated. + * + * face_index :: + * Index of the face. For passing to @FT_New_Face. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_GetFilePath_From_Mac_ATS_Name( const char* fontName, + UInt8* path, + UInt32 maxPathSize, + FT_Long* face_index ) + FT_DEPRECATED_ATTRIBUTE; + + + /************************************************************************** + * + * @function: + * FT_New_Face_From_FSSpec + * + * @description: + * Create a new face object from a given resource and typeface index + * using an FSSpec to the font file. + * + * @inout: + * library :: + * A handle to the library resource. + * + * @input: + * spec :: + * FSSpec to the font file. + * + * face_index :: + * The index of the face within the resource. The first face has + * index~0. + * @output: + * aface :: + * A handle to a new face object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * @FT_New_Face_From_FSSpec is identical to @FT_New_Face except it + * accepts an FSSpec instead of a path. + */ + FT_EXPORT( FT_Error ) + FT_New_Face_From_FSSpec( FT_Library library, + const FSSpec *spec, + FT_Long face_index, + FT_Face *aface ) + FT_DEPRECATED_ATTRIBUTE; + + + /************************************************************************** + * + * @function: + * FT_New_Face_From_FSRef + * + * @description: + * Create a new face object from a given resource and typeface index + * using an FSRef to the font file. + * + * @inout: + * library :: + * A handle to the library resource. + * + * @input: + * spec :: + * FSRef to the font file. + * + * face_index :: + * The index of the face within the resource. The first face has + * index~0. + * @output: + * aface :: + * A handle to a new face object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * @FT_New_Face_From_FSRef is identical to @FT_New_Face except it accepts + * an FSRef instead of a path. + */ + FT_EXPORT( FT_Error ) + FT_New_Face_From_FSRef( FT_Library library, + const FSRef *ref, + FT_Long face_index, + FT_Face *aface ) + FT_DEPRECATED_ATTRIBUTE; + + /* */ + + +FT_END_HEADER + + +#endif /* FTMAC_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftmm.h b/Example/include/freetype/ftmm.h new file mode 100644 index 0000000..d145128 --- /dev/null +++ b/Example/include/freetype/ftmm.h @@ -0,0 +1,805 @@ +/**************************************************************************** + * + * ftmm.h + * + * FreeType Multiple Master font interface (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTMM_H_ +#define FTMM_H_ + + +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * multiple_masters + * + * @title: + * Multiple Masters + * + * @abstract: + * How to manage Multiple Masters fonts. + * + * @description: + * The following types and functions are used to manage Multiple Master + * fonts, i.e., the selection of specific design instances by setting + * design axis coordinates. + * + * Besides Adobe MM fonts, the interface supports Apple's TrueType GX and + * OpenType variation fonts. Some of the routines only work with Adobe + * MM fonts, others will work with all three types. They are similar + * enough that a consistent interface makes sense. + * + * For Adobe MM fonts, macro @FT_IS_SFNT returns false. For GX and + * OpenType variation fonts, it returns true. + * + */ + + + /************************************************************************** + * + * @struct: + * FT_MM_Axis + * + * @description: + * A structure to model a given axis in design space for Multiple Masters + * fonts. + * + * This structure can't be used for TrueType GX or OpenType variation + * fonts. + * + * @fields: + * name :: + * The axis's name. + * + * minimum :: + * The axis's minimum design coordinate. + * + * maximum :: + * The axis's maximum design coordinate. + */ + typedef struct FT_MM_Axis_ + { + FT_String* name; + FT_Long minimum; + FT_Long maximum; + + } FT_MM_Axis; + + + /************************************************************************** + * + * @struct: + * FT_Multi_Master + * + * @description: + * A structure to model the axes and space of a Multiple Masters font. + * + * This structure can't be used for TrueType GX or OpenType variation + * fonts. + * + * @fields: + * num_axis :: + * Number of axes. Cannot exceed~4. + * + * num_designs :: + * Number of designs; should be normally 2^num_axis even though the + * Type~1 specification strangely allows for intermediate designs to be + * present. This number cannot exceed~16. + * + * axis :: + * A table of axis descriptors. + */ + typedef struct FT_Multi_Master_ + { + FT_UInt num_axis; + FT_UInt num_designs; + FT_MM_Axis axis[T1_MAX_MM_AXIS]; + + } FT_Multi_Master; + + + /************************************************************************** + * + * @struct: + * FT_Var_Axis + * + * @description: + * A structure to model a given axis in design space for Multiple + * Masters, TrueType GX, and OpenType variation fonts. + * + * @fields: + * name :: + * The axis's name. Not always meaningful for TrueType GX or OpenType + * variation fonts. + * + * minimum :: + * The axis's minimum design coordinate. + * + * def :: + * The axis's default design coordinate. FreeType computes meaningful + * default values for Adobe MM fonts. + * + * maximum :: + * The axis's maximum design coordinate. + * + * tag :: + * The axis's tag (the equivalent to 'name' for TrueType GX and + * OpenType variation fonts). FreeType provides default values for + * Adobe MM fonts if possible. + * + * strid :: + * The axis name entry in the font's 'name' table. This is another + * (and often better) version of the 'name' field for TrueType GX or + * OpenType variation fonts. Not meaningful for Adobe MM fonts. + * + * @note: + * The fields `minimum`, `def`, and `maximum` are 16.16 fractional values + * for TrueType GX and OpenType variation fonts. For Adobe MM fonts, the + * values are whole numbers (i.e., the fractional part is zero). + */ + typedef struct FT_Var_Axis_ + { + FT_String* name; + + FT_Fixed minimum; + FT_Fixed def; + FT_Fixed maximum; + + FT_ULong tag; + FT_UInt strid; + + } FT_Var_Axis; + + + /************************************************************************** + * + * @struct: + * FT_Var_Named_Style + * + * @description: + * A structure to model a named instance in a TrueType GX or OpenType + * variation font. + * + * This structure can't be used for Adobe MM fonts. + * + * @fields: + * coords :: + * The design coordinates for this instance. This is an array with one + * entry for each axis. + * + * strid :: + * The entry in 'name' table identifying this instance. + * + * psid :: + * The entry in 'name' table identifying a PostScript name for this + * instance. Value 0xFFFF indicates a missing entry. + */ + typedef struct FT_Var_Named_Style_ + { + FT_Fixed* coords; + FT_UInt strid; + FT_UInt psid; /* since 2.7.1 */ + + } FT_Var_Named_Style; + + + /************************************************************************** + * + * @struct: + * FT_MM_Var + * + * @description: + * A structure to model the axes and space of an Adobe MM, TrueType GX, + * or OpenType variation font. + * + * Some fields are specific to one format and not to the others. + * + * @fields: + * num_axis :: + * The number of axes. The maximum value is~4 for Adobe MM fonts; no + * limit in TrueType GX or OpenType variation fonts. + * + * num_designs :: + * The number of designs; should be normally 2^num_axis for Adobe MM + * fonts. Not meaningful for TrueType GX or OpenType variation fonts + * (where every glyph could have a different number of designs). + * + * num_namedstyles :: + * The number of named styles; a 'named style' is a tuple of design + * coordinates that has a string ID (in the 'name' table) associated + * with it. The font can tell the user that, for example, + * [Weight=1.5,Width=1.1] is 'Bold'. Another name for 'named style' is + * 'named instance'. + * + * For Adobe Multiple Masters fonts, this value is always zero because + * the format does not support named styles. + * + * axis :: + * An axis descriptor table. TrueType GX and OpenType variation fonts + * contain slightly more data than Adobe MM fonts. Memory management + * of this pointer is done internally by FreeType. + * + * namedstyle :: + * A named style (instance) table. Only meaningful for TrueType GX and + * OpenType variation fonts. Memory management of this pointer is done + * internally by FreeType. + */ + typedef struct FT_MM_Var_ + { + FT_UInt num_axis; + FT_UInt num_designs; + FT_UInt num_namedstyles; + FT_Var_Axis* axis; + FT_Var_Named_Style* namedstyle; + + } FT_MM_Var; + + + /************************************************************************** + * + * @function: + * FT_Get_Multi_Master + * + * @description: + * Retrieve a variation descriptor of a given Adobe MM font. + * + * This function can't be used with TrueType GX or OpenType variation + * fonts. + * + * @input: + * face :: + * A handle to the source face. + * + * @output: + * amaster :: + * The Multiple Masters descriptor. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Get_Multi_Master( FT_Face face, + FT_Multi_Master *amaster ); + + + /************************************************************************** + * + * @function: + * FT_Get_MM_Var + * + * @description: + * Retrieve a variation descriptor for a given font. + * + * This function works with all supported variation formats. + * + * @input: + * face :: + * A handle to the source face. + * + * @output: + * amaster :: + * The variation descriptor. Allocates a data structure, which the + * user must deallocate with a call to @FT_Done_MM_Var after use. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Get_MM_Var( FT_Face face, + FT_MM_Var* *amaster ); + + + /************************************************************************** + * + * @function: + * FT_Done_MM_Var + * + * @description: + * Free the memory allocated by @FT_Get_MM_Var. + * + * @input: + * library :: + * A handle of the face's parent library object that was used in the + * call to @FT_Get_MM_Var to create `amaster`. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Done_MM_Var( FT_Library library, + FT_MM_Var *amaster ); + + + /************************************************************************** + * + * @function: + * FT_Set_MM_Design_Coordinates + * + * @description: + * For Adobe MM fonts, choose an interpolated font design through design + * coordinates. + * + * This function can't be used with TrueType GX or OpenType variation + * fonts. + * + * @inout: + * face :: + * A handle to the source face. + * + * @input: + * num_coords :: + * The number of available design coordinates. If it is larger than + * the number of axes, ignore the excess values. If it is smaller than + * the number of axes, use default values for the remaining axes. + * + * coords :: + * An array of design coordinates. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * [Since 2.8.1] To reset all axes to the default values, call the + * function with `num_coords` set to zero and `coords` set to `NULL`. + * + * [Since 2.9] If `num_coords` is larger than zero, this function sets + * the @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field + * (i.e., @FT_IS_VARIATION will return true). If `num_coords` is zero, + * this bit flag gets unset. + */ + FT_EXPORT( FT_Error ) + FT_Set_MM_Design_Coordinates( FT_Face face, + FT_UInt num_coords, + FT_Long* coords ); + + + /************************************************************************** + * + * @function: + * FT_Set_Var_Design_Coordinates + * + * @description: + * Choose an interpolated font design through design coordinates. + * + * This function works with all supported variation formats. + * + * @inout: + * face :: + * A handle to the source face. + * + * @input: + * num_coords :: + * The number of available design coordinates. If it is larger than + * the number of axes, ignore the excess values. If it is smaller than + * the number of axes, use default values for the remaining axes. + * + * coords :: + * An array of design coordinates. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The design coordinates are 16.16 fractional values for TrueType GX and + * OpenType variation fonts. For Adobe MM fonts, the values are supposed + * to be whole numbers (i.e., the fractional part is zero). + * + * [Since 2.8.1] To reset all axes to the default values, call the + * function with `num_coords` set to zero and `coords` set to `NULL`. + * [Since 2.9] 'Default values' means the currently selected named + * instance (or the base font if no named instance is selected). + * + * [Since 2.9] If `num_coords` is larger than zero, this function sets + * the @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field + * (i.e., @FT_IS_VARIATION will return true). If `num_coords` is zero, + * this bit flag gets unset. + */ + FT_EXPORT( FT_Error ) + FT_Set_Var_Design_Coordinates( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @function: + * FT_Get_Var_Design_Coordinates + * + * @description: + * Get the design coordinates of the currently selected interpolated + * font. + * + * This function works with all supported variation formats. + * + * @input: + * face :: + * A handle to the source face. + * + * num_coords :: + * The number of design coordinates to retrieve. If it is larger than + * the number of axes, set the excess values to~0. + * + * @output: + * coords :: + * The design coordinates array. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The design coordinates are 16.16 fractional values for TrueType GX and + * OpenType variation fonts. For Adobe MM fonts, the values are whole + * numbers (i.e., the fractional part is zero). + * + * @since: + * 2.7.1 + */ + FT_EXPORT( FT_Error ) + FT_Get_Var_Design_Coordinates( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @function: + * FT_Set_MM_Blend_Coordinates + * + * @description: + * Choose an interpolated font design through normalized blend + * coordinates. + * + * This function works with all supported variation formats. + * + * @inout: + * face :: + * A handle to the source face. + * + * @input: + * num_coords :: + * The number of available design coordinates. If it is larger than + * the number of axes, ignore the excess values. If it is smaller than + * the number of axes, use default values for the remaining axes. + * + * coords :: + * The design coordinates array. Each element is a 16.16 fractional + * value and must be between 0 and 1.0 for Adobe MM fonts, and between + * -1.0 and 1.0 for TrueType GX and OpenType variation fonts. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * [Since 2.8.1] To reset all axes to the default values, call the + * function with `num_coords` set to zero and `coords` set to `NULL`. + * [Since 2.9] 'Default values' means the currently selected named + * instance (or the base font if no named instance is selected). + * + * [Since 2.9] If `num_coords` is larger than zero, this function sets + * the @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field + * (i.e., @FT_IS_VARIATION will return true). If `num_coords` is zero, + * this bit flag gets unset. + */ + FT_EXPORT( FT_Error ) + FT_Set_MM_Blend_Coordinates( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @function: + * FT_Get_MM_Blend_Coordinates + * + * @description: + * Get the normalized blend coordinates of the currently selected + * interpolated font. + * + * This function works with all supported variation formats. + * + * @input: + * face :: + * A handle to the source face. + * + * num_coords :: + * The number of normalized blend coordinates to retrieve. If it is + * larger than the number of axes, set the excess values to~0.5 for + * Adobe MM fonts, and to~0 for TrueType GX and OpenType variation + * fonts. + * + * @output: + * coords :: + * The normalized blend coordinates array (as 16.16 fractional values). + * + * @return: + * FreeType error code. 0~means success. + * + * @since: + * 2.7.1 + */ + FT_EXPORT( FT_Error ) + FT_Get_MM_Blend_Coordinates( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @function: + * FT_Set_Var_Blend_Coordinates + * + * @description: + * This is another name of @FT_Set_MM_Blend_Coordinates. + */ + FT_EXPORT( FT_Error ) + FT_Set_Var_Blend_Coordinates( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @function: + * FT_Get_Var_Blend_Coordinates + * + * @description: + * This is another name of @FT_Get_MM_Blend_Coordinates. + * + * @since: + * 2.7.1 + */ + FT_EXPORT( FT_Error ) + FT_Get_Var_Blend_Coordinates( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @function: + * FT_Set_MM_WeightVector + * + * @description: + * For Adobe MM fonts, choose an interpolated font design by directly + * setting the weight vector. + * + * This function can't be used with TrueType GX or OpenType variation + * fonts. + * + * @inout: + * face :: + * A handle to the source face. + * + * @input: + * len :: + * The length of the weight vector array. If it is larger than the + * number of designs, the extra values are ignored. If it is less than + * the number of designs, the remaining values are set to zero. + * + * weightvector :: + * An array representing the weight vector. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Adobe Multiple Master fonts limit the number of designs, and thus the + * length of the weight vector to 16~elements. + * + * If `len` is larger than zero, this function sets the + * @FT_FACE_FLAG_VARIATION bit in @FT_Face's `face_flags` field (i.e., + * @FT_IS_VARIATION will return true). If `len` is zero, this bit flag + * is unset and the weight vector array is reset to the default values. + * + * The Adobe documentation also states that the values in the + * WeightVector array must total 1.0 +/-~0.001. In practice this does + * not seem to be enforced, so is not enforced here, either. + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Error ) + FT_Set_MM_WeightVector( FT_Face face, + FT_UInt len, + FT_Fixed* weightvector ); + + + /************************************************************************** + * + * @function: + * FT_Get_MM_WeightVector + * + * @description: + * For Adobe MM fonts, retrieve the current weight vector of the font. + * + * This function can't be used with TrueType GX or OpenType variation + * fonts. + * + * @inout: + * face :: + * A handle to the source face. + * + * len :: + * A pointer to the size of the array to be filled. If the size of the + * array is less than the number of designs, `FT_Err_Invalid_Argument` + * is returned, and `len` is set to the required size (the number of + * designs). If the size of the array is greater than the number of + * designs, the remaining entries are set to~0. On successful + * completion, `len` is set to the number of designs (i.e., the number + * of values written to the array). + * + * @output: + * weightvector :: + * An array to be filled. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Adobe Multiple Master fonts limit the number of designs, and thus the + * length of the WeightVector to~16. + * + * @since: + * 2.10 + */ + FT_EXPORT( FT_Error ) + FT_Get_MM_WeightVector( FT_Face face, + FT_UInt* len, + FT_Fixed* weightvector ); + + + /************************************************************************** + * + * @enum: + * FT_VAR_AXIS_FLAG_XXX + * + * @description: + * A list of bit flags used in the return value of + * @FT_Get_Var_Axis_Flags. + * + * @values: + * FT_VAR_AXIS_FLAG_HIDDEN :: + * The variation axis should not be exposed to user interfaces. + * + * @since: + * 2.8.1 + */ +#define FT_VAR_AXIS_FLAG_HIDDEN 1 + + + /************************************************************************** + * + * @function: + * FT_Get_Var_Axis_Flags + * + * @description: + * Get the 'flags' field of an OpenType Variation Axis Record. + * + * Not meaningful for Adobe MM fonts (`*flags` is always zero). + * + * @input: + * master :: + * The variation descriptor. + * + * axis_index :: + * The index of the requested variation axis. + * + * @output: + * flags :: + * The 'flags' field. See @FT_VAR_AXIS_FLAG_XXX for possible values. + * + * @return: + * FreeType error code. 0~means success. + * + * @since: + * 2.8.1 + */ + FT_EXPORT( FT_Error ) + FT_Get_Var_Axis_Flags( FT_MM_Var* master, + FT_UInt axis_index, + FT_UInt* flags ); + + + /************************************************************************** + * + * @function: + * FT_Set_Named_Instance + * + * @description: + * Set or change the current named instance. + * + * @input: + * face :: + * A handle to the source face. + * + * instance_index :: + * The index of the requested instance, starting with value 1. If set + * to value 0, FreeType switches to font access without a named + * instance. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The function uses the value of `instance_index` to set bits 16-30 of + * the face's `face_index` field. It also resets any variation applied + * to the font, and the @FT_FACE_FLAG_VARIATION bit of the face's + * `face_flags` field gets reset to zero (i.e., @FT_IS_VARIATION will + * return false). + * + * For Adobe MM fonts (which don't have named instances) this function + * simply resets the current face to the default instance. + * + * @since: + * 2.9 + */ + FT_EXPORT( FT_Error ) + FT_Set_Named_Instance( FT_Face face, + FT_UInt instance_index ); + + + /************************************************************************** + * + * @function: + * FT_Get_Default_Named_Instance + * + * @description: + * Retrieve the index of the default named instance, to be used with + * @FT_Set_Named_Instance. + * + * The default instance of a variation font is that instance for which + * the nth axis coordinate is equal to `axis[n].def` (as specified in the + * @FT_MM_Var structure), with~n covering all axes. + * + * FreeType synthesizes a named instance for the default instance if the + * font does not contain such an entry. + * + * @input: + * face :: + * A handle to the source face. + * + * @output: + * instance_index :: + * The index of the default named instance. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * For Adobe MM fonts (which don't have named instances) this function + * always returns zero for `instance_index`. + * + * @since: + * 2.13.1 + */ + FT_EXPORT( FT_Error ) + FT_Get_Default_Named_Instance( FT_Face face, + FT_UInt *instance_index ); + + /* */ + + +FT_END_HEADER + +#endif /* FTMM_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftmodapi.h b/Example/include/freetype/ftmodapi.h new file mode 100644 index 0000000..c8f0c2c --- /dev/null +++ b/Example/include/freetype/ftmodapi.h @@ -0,0 +1,807 @@ +/**************************************************************************** + * + * ftmodapi.h + * + * FreeType modules public interface (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTMODAPI_H_ +#define FTMODAPI_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * module_management + * + * @title: + * Module Management + * + * @abstract: + * How to add, upgrade, remove, and control modules from FreeType. + * + * @description: + * The definitions below are used to manage modules within FreeType. + * Internal and external modules can be added, upgraded, and removed at + * runtime. For example, an alternative renderer or proprietary font + * driver can be registered and prioritized. Additionally, some module + * properties can also be controlled. + * + * Here is a list of existing values of the `module_name` field in the + * @FT_Module_Class structure. + * + * ``` + * autofitter + * bdf + * cff + * gxvalid + * otvalid + * pcf + * pfr + * psaux + * pshinter + * psnames + * raster1 + * sfnt + * smooth + * truetype + * type1 + * type42 + * t1cid + * winfonts + * ``` + * + * Note that the FreeType Cache sub-system is not a FreeType module. + * + * @order: + * FT_Module + * FT_Module_Constructor + * FT_Module_Destructor + * FT_Module_Requester + * FT_Module_Class + * + * FT_Add_Module + * FT_Get_Module + * FT_Remove_Module + * FT_Add_Default_Modules + * + * FT_FACE_DRIVER_NAME + * FT_Property_Set + * FT_Property_Get + * FT_Set_Default_Properties + * + * FT_New_Library + * FT_Done_Library + * FT_Reference_Library + * + * FT_Renderer + * FT_Renderer_Class + * + * FT_Get_Renderer + * FT_Set_Renderer + * + * FT_Set_Debug_Hook + * + */ + + + /* module bit flags */ +#define FT_MODULE_FONT_DRIVER 1 /* this module is a font driver */ +#define FT_MODULE_RENDERER 2 /* this module is a renderer */ +#define FT_MODULE_HINTER 4 /* this module is a glyph hinter */ +#define FT_MODULE_STYLER 8 /* this module is a styler */ + +#define FT_MODULE_DRIVER_SCALABLE 0x100 /* the driver supports */ + /* scalable fonts */ +#define FT_MODULE_DRIVER_NO_OUTLINES 0x200 /* the driver does not */ + /* support vector outlines */ +#define FT_MODULE_DRIVER_HAS_HINTER 0x400 /* the driver provides its */ + /* own hinter */ +#define FT_MODULE_DRIVER_HINTS_LIGHTLY 0x800 /* the driver's hinter */ + /* produces LIGHT hints */ + + + /* deprecated values */ +#define ft_module_font_driver FT_MODULE_FONT_DRIVER +#define ft_module_renderer FT_MODULE_RENDERER +#define ft_module_hinter FT_MODULE_HINTER +#define ft_module_styler FT_MODULE_STYLER + +#define ft_module_driver_scalable FT_MODULE_DRIVER_SCALABLE +#define ft_module_driver_no_outlines FT_MODULE_DRIVER_NO_OUTLINES +#define ft_module_driver_has_hinter FT_MODULE_DRIVER_HAS_HINTER +#define ft_module_driver_hints_lightly FT_MODULE_DRIVER_HINTS_LIGHTLY + + + typedef FT_Pointer FT_Module_Interface; + + + /************************************************************************** + * + * @functype: + * FT_Module_Constructor + * + * @description: + * A function used to initialize (not create) a new module object. + * + * @input: + * module :: + * The module to initialize. + */ + typedef FT_Error + (*FT_Module_Constructor)( FT_Module module ); + + + /************************************************************************** + * + * @functype: + * FT_Module_Destructor + * + * @description: + * A function used to finalize (not destroy) a given module object. + * + * @input: + * module :: + * The module to finalize. + */ + typedef void + (*FT_Module_Destructor)( FT_Module module ); + + + /************************************************************************** + * + * @functype: + * FT_Module_Requester + * + * @description: + * A function used to query a given module for a specific interface. + * + * @input: + * module :: + * The module to be searched. + * + * name :: + * The name of the interface in the module. + */ + typedef FT_Module_Interface + (*FT_Module_Requester)( FT_Module module, + const char* name ); + + + /************************************************************************** + * + * @struct: + * FT_Module_Class + * + * @description: + * The module class descriptor. While being a public structure necessary + * for FreeType's module bookkeeping, most of the fields are essentially + * internal, not to be used directly by an application. + * + * @fields: + * module_flags :: + * Bit flags describing the module. + * + * module_size :: + * The size of one module object/instance in bytes. + * + * module_name :: + * The name of the module. + * + * module_version :: + * The version, as a 16.16 fixed number (major.minor). + * + * module_requires :: + * The version of FreeType this module requires, as a 16.16 fixed + * number (major.minor). Starts at version 2.0, i.e., 0x20000. + * + * module_interface :: + * A typeless pointer to a structure (which varies between different + * modules) that holds the module's interface functions. This is + * essentially what `get_interface` returns. + * + * module_init :: + * The initializing function. + * + * module_done :: + * The finalizing function. + * + * get_interface :: + * The interface requesting function. + */ + typedef struct FT_Module_Class_ + { + FT_ULong module_flags; + FT_Long module_size; + const FT_String* module_name; + FT_Fixed module_version; + FT_Fixed module_requires; + + const void* module_interface; + + FT_Module_Constructor module_init; + FT_Module_Destructor module_done; + FT_Module_Requester get_interface; + + } FT_Module_Class; + + + /************************************************************************** + * + * @function: + * FT_Add_Module + * + * @description: + * Add a new module to a given library instance. + * + * @inout: + * library :: + * A handle to the library object. + * + * @input: + * clazz :: + * A pointer to class descriptor for the module. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * An error will be returned if a module already exists by that name, or + * if the module requires a version of FreeType that is too great. + */ + FT_EXPORT( FT_Error ) + FT_Add_Module( FT_Library library, + const FT_Module_Class* clazz ); + + + /************************************************************************** + * + * @function: + * FT_Get_Module + * + * @description: + * Find a module by its name. + * + * @input: + * library :: + * A handle to the library object. + * + * module_name :: + * The module's name (as an ASCII string). + * + * @return: + * A module handle. 0~if none was found. + * + * @note: + * FreeType's internal modules aren't documented very well, and you + * should look up the source code for details. + */ + FT_EXPORT( FT_Module ) + FT_Get_Module( FT_Library library, + const char* module_name ); + + + /************************************************************************** + * + * @function: + * FT_Remove_Module + * + * @description: + * Remove a given module from a library instance. + * + * @inout: + * library :: + * A handle to a library object. + * + * @input: + * module :: + * A handle to a module object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The module object is destroyed by the function in case of success. + */ + FT_EXPORT( FT_Error ) + FT_Remove_Module( FT_Library library, + FT_Module module ); + + + /************************************************************************** + * + * @macro: + * FT_FACE_DRIVER_NAME + * + * @description: + * A macro that retrieves the name of a font driver from a face object. + * + * @note: + * The font driver name is a valid `module_name` for @FT_Property_Set + * and @FT_Property_Get. This is not the same as @FT_Get_Font_Format. + * + * @since: + * 2.11 + * + */ +#define FT_FACE_DRIVER_NAME( face ) \ + ( ( *FT_REINTERPRET_CAST( FT_Module_Class**, \ + ( face )->driver ) )->module_name ) + + + /************************************************************************** + * + * @function: + * FT_Property_Set + * + * @description: + * Set a property for a given module. + * + * @input: + * library :: + * A handle to the library the module is part of. + * + * module_name :: + * The module name. + * + * property_name :: + * The property name. Properties are described in section + * @properties. + * + * Note that only a few modules have properties. + * + * value :: + * A generic pointer to a variable or structure that gives the new + * value of the property. The exact definition of `value` is + * dependent on the property; see section @properties. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If `module_name` isn't a valid module name, or `property_name` + * doesn't specify a valid property, or if `value` doesn't represent a + * valid value for the given property, an error is returned. + * + * The following example sets property 'bar' (a simple integer) in + * module 'foo' to value~1. + * + * ``` + * FT_UInt bar; + * + * + * bar = 1; + * FT_Property_Set( library, "foo", "bar", &bar ); + * ``` + * + * Note that the FreeType Cache sub-system doesn't recognize module + * property changes. To avoid glyph lookup confusion within the cache + * you should call @FTC_Manager_Reset to completely flush the cache if a + * module property gets changed after @FTC_Manager_New has been called. + * + * It is not possible to set properties of the FreeType Cache sub-system + * itself with FT_Property_Set; use @FTC_Property_Set instead. + * + * @since: + * 2.4.11 + * + */ + FT_EXPORT( FT_Error ) + FT_Property_Set( FT_Library library, + const FT_String* module_name, + const FT_String* property_name, + const void* value ); + + + /************************************************************************** + * + * @function: + * FT_Property_Get + * + * @description: + * Get a module's property value. + * + * @input: + * library :: + * A handle to the library the module is part of. + * + * module_name :: + * The module name. + * + * property_name :: + * The property name. Properties are described in section + * @properties. + * + * @inout: + * value :: + * A generic pointer to a variable or structure that gives the value + * of the property. The exact definition of `value` is dependent on + * the property; see section @properties. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If `module_name` isn't a valid module name, or `property_name` + * doesn't specify a valid property, or if `value` doesn't represent a + * valid value for the given property, an error is returned. + * + * The following example gets property 'baz' (a range) in module 'foo'. + * + * ``` + * typedef range_ + * { + * FT_Int32 min; + * FT_Int32 max; + * + * } range; + * + * range baz; + * + * + * FT_Property_Get( library, "foo", "baz", &baz ); + * ``` + * + * It is not possible to retrieve properties of the FreeType Cache + * sub-system with FT_Property_Get; use @FTC_Property_Get instead. + * + * @since: + * 2.4.11 + * + */ + FT_EXPORT( FT_Error ) + FT_Property_Get( FT_Library library, + const FT_String* module_name, + const FT_String* property_name, + void* value ); + + + /************************************************************************** + * + * @function: + * FT_Set_Default_Properties + * + * @description: + * If compilation option `FT_CONFIG_OPTION_ENVIRONMENT_PROPERTIES` is + * set, this function reads the `FREETYPE_PROPERTIES` environment + * variable to control driver properties. See section @properties for + * more. + * + * If the compilation option is not set, this function does nothing. + * + * `FREETYPE_PROPERTIES` has the following syntax form (broken here into + * multiple lines for better readability). + * + * ``` + * + * ':' + * '=' + * + * ':' + * '=' + * ... + * ``` + * + * Example: + * + * ``` + * FREETYPE_PROPERTIES=truetype:interpreter-version=35 \ + * cff:no-stem-darkening=0 + * ``` + * + * @inout: + * library :: + * A handle to a new library object. + * + * @since: + * 2.8 + */ + FT_EXPORT( void ) + FT_Set_Default_Properties( FT_Library library ); + + + /************************************************************************** + * + * @function: + * FT_Reference_Library + * + * @description: + * A counter gets initialized to~1 at the time an @FT_Library structure + * is created. This function increments the counter. @FT_Done_Library + * then only destroys a library if the counter is~1, otherwise it simply + * decrements the counter. + * + * This function helps in managing life-cycles of structures that + * reference @FT_Library objects. + * + * @input: + * library :: + * A handle to a target library object. + * + * @return: + * FreeType error code. 0~means success. + * + * @since: + * 2.4.2 + */ + FT_EXPORT( FT_Error ) + FT_Reference_Library( FT_Library library ); + + + /************************************************************************** + * + * @function: + * FT_New_Library + * + * @description: + * This function is used to create a new FreeType library instance from a + * given memory object. It is thus possible to use libraries with + * distinct memory allocators within the same program. Note, however, + * that the used @FT_Memory structure is expected to remain valid for the + * life of the @FT_Library object. + * + * Normally, you would call this function (followed by a call to + * @FT_Add_Default_Modules or a series of calls to @FT_Add_Module, and a + * call to @FT_Set_Default_Properties) instead of @FT_Init_FreeType to + * initialize the FreeType library. + * + * Don't use @FT_Done_FreeType but @FT_Done_Library to destroy a library + * instance. + * + * @input: + * memory :: + * A handle to the original memory object. + * + * @output: + * alibrary :: + * A pointer to handle of a new library object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * See the discussion of reference counters in the description of + * @FT_Reference_Library. + */ + FT_EXPORT( FT_Error ) + FT_New_Library( FT_Memory memory, + FT_Library *alibrary ); + + + /************************************************************************** + * + * @function: + * FT_Done_Library + * + * @description: + * Discard a given library object. This closes all drivers and discards + * all resource objects. + * + * @input: + * library :: + * A handle to the target library. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * See the discussion of reference counters in the description of + * @FT_Reference_Library. + */ + FT_EXPORT( FT_Error ) + FT_Done_Library( FT_Library library ); + + + /************************************************************************** + * + * @functype: + * FT_DebugHook_Func + * + * @description: + * A drop-in replacement (or rather a wrapper) for the bytecode or + * charstring interpreter's main loop function. + * + * Its job is essentially + * + * - to activate debug mode to enforce single-stepping, + * + * - to call the main loop function to interpret the next opcode, and + * + * - to show the changed context to the user. + * + * An example for such a main loop function is `TT_RunIns` (declared in + * FreeType's internal header file `src/truetype/ttinterp.h`). + * + * Have a look at the source code of the `ttdebug` FreeType demo program + * for an example of a drop-in replacement. + * + * @inout: + * arg :: + * A typeless pointer, to be cast to the main loop function's data + * structure (which depends on the font module). For TrueType fonts + * it is bytecode interpreter's execution context, `TT_ExecContext`, + * which is declared in FreeType's internal header file `tttypes.h`. + */ + typedef FT_Error + (*FT_DebugHook_Func)( void* arg ); + + + /************************************************************************** + * + * @enum: + * FT_DEBUG_HOOK_XXX + * + * @description: + * A list of named debug hook indices. + * + * @values: + * FT_DEBUG_HOOK_TRUETYPE:: + * This hook index identifies the TrueType bytecode debugger. + */ +#define FT_DEBUG_HOOK_TRUETYPE 0 + + + /************************************************************************** + * + * @function: + * FT_Set_Debug_Hook + * + * @description: + * Set a debug hook function for debugging the interpreter of a font + * format. + * + * While this is a public API function, an application needs access to + * FreeType's internal header files to do something useful. + * + * Have a look at the source code of the `ttdebug` FreeType demo program + * for an example of its usage. + * + * @inout: + * library :: + * A handle to the library object. + * + * @input: + * hook_index :: + * The index of the debug hook. You should use defined enumeration + * macros like @FT_DEBUG_HOOK_TRUETYPE. + * + * debug_hook :: + * The function used to debug the interpreter. + * + * @note: + * Currently, four debug hook slots are available, but only one (for the + * TrueType interpreter) is defined. + */ + FT_EXPORT( void ) + FT_Set_Debug_Hook( FT_Library library, + FT_UInt hook_index, + FT_DebugHook_Func debug_hook ); + + + /************************************************************************** + * + * @function: + * FT_Add_Default_Modules + * + * @description: + * Add the set of default drivers to a given library object. This is + * only useful when you create a library object with @FT_New_Library + * (usually to plug a custom memory manager). + * + * @inout: + * library :: + * A handle to a new library object. + */ + FT_EXPORT( void ) + FT_Add_Default_Modules( FT_Library library ); + + + + /************************************************************************** + * + * @section: + * truetype_engine + * + * @title: + * The TrueType Engine + * + * @abstract: + * TrueType bytecode support. + * + * @description: + * This section contains a function used to query the level of TrueType + * bytecode support compiled in this version of the library. + * + */ + + + /************************************************************************** + * + * @enum: + * FT_TrueTypeEngineType + * + * @description: + * A list of values describing which kind of TrueType bytecode engine is + * implemented in a given FT_Library instance. It is used by the + * @FT_Get_TrueType_Engine_Type function. + * + * @values: + * FT_TRUETYPE_ENGINE_TYPE_NONE :: + * The library doesn't implement any kind of bytecode interpreter. + * + * FT_TRUETYPE_ENGINE_TYPE_UNPATENTED :: + * Deprecated and removed. + * + * FT_TRUETYPE_ENGINE_TYPE_PATENTED :: + * The library implements a bytecode interpreter that covers the full + * instruction set of the TrueType virtual machine (this was governed + * by patents until May 2010, hence the name). + * + * @since: + * 2.2 + * + */ + typedef enum FT_TrueTypeEngineType_ + { + FT_TRUETYPE_ENGINE_TYPE_NONE = 0, + FT_TRUETYPE_ENGINE_TYPE_UNPATENTED, + FT_TRUETYPE_ENGINE_TYPE_PATENTED + + } FT_TrueTypeEngineType; + + + /************************************************************************** + * + * @function: + * FT_Get_TrueType_Engine_Type + * + * @description: + * Return an @FT_TrueTypeEngineType value to indicate which level of the + * TrueType virtual machine a given library instance supports. + * + * @input: + * library :: + * A library instance. + * + * @return: + * A value indicating which level is supported. + * + * @since: + * 2.2 + * + */ + FT_EXPORT( FT_TrueTypeEngineType ) + FT_Get_TrueType_Engine_Type( FT_Library library ); + + /* */ + + +FT_END_HEADER + +#endif /* FTMODAPI_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftmoderr.h b/Example/include/freetype/ftmoderr.h new file mode 100644 index 0000000..c8c892d --- /dev/null +++ b/Example/include/freetype/ftmoderr.h @@ -0,0 +1,204 @@ +/**************************************************************************** + * + * ftmoderr.h + * + * FreeType module error offsets (specification). + * + * Copyright (C) 2001-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This file is used to define the FreeType module error codes. + * + * If the macro `FT_CONFIG_OPTION_USE_MODULE_ERRORS` in `ftoption.h` is + * set, the lower byte of an error value identifies the error code as + * usual. In addition, the higher byte identifies the module. For + * example, the error `FT_Err_Invalid_File_Format` has value 0x0003, the + * error `TT_Err_Invalid_File_Format` has value 0x1303, the error + * `T1_Err_Invalid_File_Format` has value 0x1403, etc. + * + * Note that `FT_Err_Ok`, `TT_Err_Ok`, etc. are always equal to zero, + * including the high byte. + * + * If `FT_CONFIG_OPTION_USE_MODULE_ERRORS` isn't set, the higher byte of an + * error value is set to zero. + * + * To hide the various `XXX_Err_` prefixes in the source code, FreeType + * provides some macros in `fttypes.h`. + * + * FT_ERR( err ) + * + * Add current error module prefix (as defined with the `FT_ERR_PREFIX` + * macro) to `err`. For example, in the BDF module the line + * + * ``` + * error = FT_ERR( Invalid_Outline ); + * ``` + * + * expands to + * + * ``` + * error = BDF_Err_Invalid_Outline; + * ``` + * + * For simplicity, you can always use `FT_Err_Ok` directly instead of + * `FT_ERR( Ok )`. + * + * FT_ERR_EQ( errcode, err ) + * FT_ERR_NEQ( errcode, err ) + * + * Compare error code `errcode` with the error `err` for equality and + * inequality, respectively. Example: + * + * ``` + * if ( FT_ERR_EQ( error, Invalid_Outline ) ) + * ... + * ``` + * + * Using this macro you don't have to think about error prefixes. Of + * course, if module errors are not active, the above example is the + * same as + * + * ``` + * if ( error == FT_Err_Invalid_Outline ) + * ... + * ``` + * + * FT_ERROR_BASE( errcode ) + * FT_ERROR_MODULE( errcode ) + * + * Get base error and module error code, respectively. + * + * It can also be used to create a module error message table easily with + * something like + * + * ``` + * #undef FTMODERR_H_ + * #define FT_MODERRDEF( e, v, s ) { FT_Mod_Err_ ## e, s }, + * #define FT_MODERR_START_LIST { + * #define FT_MODERR_END_LIST { 0, 0 } }; + * + * const struct + * { + * int mod_err_offset; + * const char* mod_err_msg + * } ft_mod_errors[] = + * + * #include + * ``` + * + */ + + +#ifndef FTMODERR_H_ +#define FTMODERR_H_ + + + /*******************************************************************/ + /*******************************************************************/ + /***** *****/ + /***** SETUP MACROS *****/ + /***** *****/ + /*******************************************************************/ + /*******************************************************************/ + + +#undef FT_NEED_EXTERN_C + +#ifndef FT_MODERRDEF + +#ifdef FT_CONFIG_OPTION_USE_MODULE_ERRORS +#define FT_MODERRDEF( e, v, s ) FT_Mod_Err_ ## e = v, +#else +#define FT_MODERRDEF( e, v, s ) FT_Mod_Err_ ## e = 0, +#endif + +#define FT_MODERR_START_LIST enum { +#define FT_MODERR_END_LIST FT_Mod_Err_Max }; + +#ifdef __cplusplus +#define FT_NEED_EXTERN_C + extern "C" { +#endif + +#endif /* !FT_MODERRDEF */ + + + /*******************************************************************/ + /*******************************************************************/ + /***** *****/ + /***** LIST MODULE ERROR BASES *****/ + /***** *****/ + /*******************************************************************/ + /*******************************************************************/ + + +#ifdef FT_MODERR_START_LIST + FT_MODERR_START_LIST +#endif + + + FT_MODERRDEF( Base, 0x000, "base module" ) + FT_MODERRDEF( Autofit, 0x100, "autofitter module" ) + FT_MODERRDEF( BDF, 0x200, "BDF module" ) + FT_MODERRDEF( Bzip2, 0x300, "Bzip2 module" ) + FT_MODERRDEF( Cache, 0x400, "cache module" ) + FT_MODERRDEF( CFF, 0x500, "CFF module" ) + FT_MODERRDEF( CID, 0x600, "CID module" ) + FT_MODERRDEF( Gzip, 0x700, "Gzip module" ) + FT_MODERRDEF( LZW, 0x800, "LZW module" ) + FT_MODERRDEF( OTvalid, 0x900, "OpenType validation module" ) + FT_MODERRDEF( PCF, 0xA00, "PCF module" ) + FT_MODERRDEF( PFR, 0xB00, "PFR module" ) + FT_MODERRDEF( PSaux, 0xC00, "PS auxiliary module" ) + FT_MODERRDEF( PShinter, 0xD00, "PS hinter module" ) + FT_MODERRDEF( PSnames, 0xE00, "PS names module" ) + FT_MODERRDEF( Raster, 0xF00, "raster module" ) + FT_MODERRDEF( SFNT, 0x1000, "SFNT module" ) + FT_MODERRDEF( Smooth, 0x1100, "smooth raster module" ) + FT_MODERRDEF( TrueType, 0x1200, "TrueType module" ) + FT_MODERRDEF( Type1, 0x1300, "Type 1 module" ) + FT_MODERRDEF( Type42, 0x1400, "Type 42 module" ) + FT_MODERRDEF( Winfonts, 0x1500, "Windows FON/FNT module" ) + FT_MODERRDEF( GXvalid, 0x1600, "GX validation module" ) + FT_MODERRDEF( Sdf, 0x1700, "Signed distance field raster module" ) + + +#ifdef FT_MODERR_END_LIST + FT_MODERR_END_LIST +#endif + + + /*******************************************************************/ + /*******************************************************************/ + /***** *****/ + /***** CLEANUP *****/ + /***** *****/ + /*******************************************************************/ + /*******************************************************************/ + + +#ifdef FT_NEED_EXTERN_C + } +#endif + +#undef FT_MODERR_START_LIST +#undef FT_MODERR_END_LIST +#undef FT_MODERRDEF +#undef FT_NEED_EXTERN_C + + +#endif /* FTMODERR_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftotval.h b/Example/include/freetype/ftotval.h new file mode 100644 index 0000000..011bdfc --- /dev/null +++ b/Example/include/freetype/ftotval.h @@ -0,0 +1,206 @@ +/**************************************************************************** + * + * ftotval.h + * + * FreeType API for validating OpenType tables (specification). + * + * Copyright (C) 2004-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +/**************************************************************************** + * + * + * Warning: This module might be moved to a different library in the + * future to avoid a tight dependency between FreeType and the + * OpenType specification. + * + * + */ + + +#ifndef FTOTVAL_H_ +#define FTOTVAL_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * ot_validation + * + * @title: + * OpenType Validation + * + * @abstract: + * An API to validate OpenType tables. + * + * @description: + * This section contains the declaration of functions to validate some + * OpenType tables (BASE, GDEF, GPOS, GSUB, JSTF, MATH). + * + * @order: + * FT_OpenType_Validate + * FT_OpenType_Free + * + * FT_VALIDATE_OTXXX + * + */ + + + /************************************************************************** + * + * @enum: + * FT_VALIDATE_OTXXX + * + * @description: + * A list of bit-field constants used with @FT_OpenType_Validate to + * indicate which OpenType tables should be validated. + * + * @values: + * FT_VALIDATE_BASE :: + * Validate BASE table. + * + * FT_VALIDATE_GDEF :: + * Validate GDEF table. + * + * FT_VALIDATE_GPOS :: + * Validate GPOS table. + * + * FT_VALIDATE_GSUB :: + * Validate GSUB table. + * + * FT_VALIDATE_JSTF :: + * Validate JSTF table. + * + * FT_VALIDATE_MATH :: + * Validate MATH table. + * + * FT_VALIDATE_OT :: + * Validate all OpenType tables (BASE, GDEF, GPOS, GSUB, JSTF, MATH). + * + */ +#define FT_VALIDATE_BASE 0x0100 +#define FT_VALIDATE_GDEF 0x0200 +#define FT_VALIDATE_GPOS 0x0400 +#define FT_VALIDATE_GSUB 0x0800 +#define FT_VALIDATE_JSTF 0x1000 +#define FT_VALIDATE_MATH 0x2000 + +#define FT_VALIDATE_OT ( FT_VALIDATE_BASE | \ + FT_VALIDATE_GDEF | \ + FT_VALIDATE_GPOS | \ + FT_VALIDATE_GSUB | \ + FT_VALIDATE_JSTF | \ + FT_VALIDATE_MATH ) + + + /************************************************************************** + * + * @function: + * FT_OpenType_Validate + * + * @description: + * Validate various OpenType tables to assure that all offsets and + * indices are valid. The idea is that a higher-level library that + * actually does the text layout can access those tables without error + * checking (which can be quite time consuming). + * + * @input: + * face :: + * A handle to the input face. + * + * validation_flags :: + * A bit field that specifies the tables to be validated. See + * @FT_VALIDATE_OTXXX for possible values. + * + * @output: + * BASE_table :: + * A pointer to the BASE table. + * + * GDEF_table :: + * A pointer to the GDEF table. + * + * GPOS_table :: + * A pointer to the GPOS table. + * + * GSUB_table :: + * A pointer to the GSUB table. + * + * JSTF_table :: + * A pointer to the JSTF table. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function only works with OpenType fonts, returning an error + * otherwise. + * + * After use, the application should deallocate the five tables with + * @FT_OpenType_Free. A `NULL` value indicates that the table either + * doesn't exist in the font, or the application hasn't asked for + * validation. + */ + FT_EXPORT( FT_Error ) + FT_OpenType_Validate( FT_Face face, + FT_UInt validation_flags, + FT_Bytes *BASE_table, + FT_Bytes *GDEF_table, + FT_Bytes *GPOS_table, + FT_Bytes *GSUB_table, + FT_Bytes *JSTF_table ); + + + /************************************************************************** + * + * @function: + * FT_OpenType_Free + * + * @description: + * Free the buffer allocated by OpenType validator. + * + * @input: + * face :: + * A handle to the input face. + * + * table :: + * The pointer to the buffer that is allocated by + * @FT_OpenType_Validate. + * + * @note: + * This function must be used to free the buffer allocated by + * @FT_OpenType_Validate only. + */ + FT_EXPORT( void ) + FT_OpenType_Free( FT_Face face, + FT_Bytes table ); + + + /* */ + + +FT_END_HEADER + +#endif /* FTOTVAL_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftoutln.h b/Example/include/freetype/ftoutln.h new file mode 100644 index 0000000..f9329ca --- /dev/null +++ b/Example/include/freetype/ftoutln.h @@ -0,0 +1,588 @@ +/**************************************************************************** + * + * ftoutln.h + * + * Support for the FT_Outline type used to store glyph shapes of + * most scalable font formats (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTOUTLN_H_ +#define FTOUTLN_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * outline_processing + * + * @title: + * Outline Processing + * + * @abstract: + * Functions to create, transform, and render vectorial glyph images. + * + * @description: + * This section contains routines used to create and destroy scalable + * glyph images known as 'outlines'. These can also be measured, + * transformed, and converted into bitmaps and pixmaps. + * + * @order: + * FT_Outline + * FT_Outline_New + * FT_Outline_Done + * FT_Outline_Copy + * FT_Outline_Translate + * FT_Outline_Transform + * FT_Outline_Embolden + * FT_Outline_EmboldenXY + * FT_Outline_Reverse + * FT_Outline_Check + * + * FT_Outline_Get_CBox + * FT_Outline_Get_BBox + * + * FT_Outline_Get_Bitmap + * FT_Outline_Render + * FT_Outline_Decompose + * FT_Outline_Funcs + * FT_Outline_MoveToFunc + * FT_Outline_LineToFunc + * FT_Outline_ConicToFunc + * FT_Outline_CubicToFunc + * + * FT_Orientation + * FT_Outline_Get_Orientation + * + * FT_OUTLINE_XXX + * + */ + + + /************************************************************************** + * + * @function: + * FT_Outline_Decompose + * + * @description: + * Walk over an outline's structure to decompose it into individual + * segments and Bezier arcs. This function also emits 'move to' + * operations to indicate the start of new contours in the outline. + * + * @input: + * outline :: + * A pointer to the source target. + * + * func_interface :: + * A table of 'emitters', i.e., function pointers called during + * decomposition to indicate path operations. + * + * @inout: + * user :: + * A typeless pointer that is passed to each emitter during the + * decomposition. It can be used to store the state during the + * decomposition. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * Degenerate contours, segments, and Bezier arcs may be reported. In + * most cases, it is best to filter these out before using the outline + * for stroking or other path modification purposes (which may cause + * degenerate segments to become non-degenrate and visible, like when + * stroke caps are used or the path is otherwise outset). Some glyph + * outlines may contain deliberate degenerate single points for mark + * attachement. + * + * Similarly, the function returns success for an empty outline also + * (doing nothing, that is, not calling any emitter); if necessary, you + * should filter this out, too. + */ + FT_EXPORT( FT_Error ) + FT_Outline_Decompose( FT_Outline* outline, + const FT_Outline_Funcs* func_interface, + void* user ); + + + /************************************************************************** + * + * @function: + * FT_Outline_New + * + * @description: + * Create a new outline of a given size. + * + * @input: + * library :: + * A handle to the library object from where the outline is allocated. + * Note however that the new outline will **not** necessarily be + * **freed**, when destroying the library, by @FT_Done_FreeType. + * + * numPoints :: + * The maximum number of points within the outline. Must be smaller + * than or equal to 0xFFFF (65535). + * + * numContours :: + * The maximum number of contours within the outline. This value must + * be in the range 0 to `numPoints`. + * + * @output: + * anoutline :: + * A handle to the new outline. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The reason why this function takes a `library` parameter is simply to + * use the library's memory allocator. + */ + FT_EXPORT( FT_Error ) + FT_Outline_New( FT_Library library, + FT_UInt numPoints, + FT_Int numContours, + FT_Outline *anoutline ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Done + * + * @description: + * Destroy an outline created with @FT_Outline_New. + * + * @input: + * library :: + * A handle of the library object used to allocate the outline. + * + * outline :: + * A pointer to the outline object to be discarded. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If the outline's 'owner' field is not set, only the outline descriptor + * will be released. + */ + FT_EXPORT( FT_Error ) + FT_Outline_Done( FT_Library library, + FT_Outline* outline ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Check + * + * @description: + * Check the contents of an outline descriptor. + * + * @input: + * outline :: + * A handle to a source outline. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * An empty outline, or an outline with a single point only is also + * valid. + */ + FT_EXPORT( FT_Error ) + FT_Outline_Check( FT_Outline* outline ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Get_CBox + * + * @description: + * Return an outline's 'control box'. The control box encloses all the + * outline's points, including Bezier control points. Though it + * coincides with the exact bounding box for most glyphs, it can be + * slightly larger in some situations (like when rotating an outline that + * contains Bezier outside arcs). + * + * Computing the control box is very fast, while getting the bounding box + * can take much more time as it needs to walk over all segments and arcs + * in the outline. To get the latter, you can use the 'ftbbox' + * component, which is dedicated to this single task. + * + * @input: + * outline :: + * A pointer to the source outline descriptor. + * + * @output: + * acbox :: + * The outline's control box. + * + * @note: + * See @FT_Glyph_Get_CBox for a discussion of tricky fonts. + */ + FT_EXPORT( void ) + FT_Outline_Get_CBox( const FT_Outline* outline, + FT_BBox *acbox ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Translate + * + * @description: + * Apply a simple translation to the points of an outline. + * + * @inout: + * outline :: + * A pointer to the target outline descriptor. + * + * @input: + * xOffset :: + * The horizontal offset. + * + * yOffset :: + * The vertical offset. + */ + FT_EXPORT( void ) + FT_Outline_Translate( const FT_Outline* outline, + FT_Pos xOffset, + FT_Pos yOffset ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Copy + * + * @description: + * Copy an outline into another one. Both objects must have the same + * sizes (number of points & number of contours) when this function is + * called. + * + * @input: + * source :: + * A handle to the source outline. + * + * @output: + * target :: + * A handle to the target outline. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Outline_Copy( const FT_Outline* source, + FT_Outline *target ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Transform + * + * @description: + * Apply a simple 2x2 matrix to all of an outline's points. Useful for + * applying rotations, slanting, flipping, etc. + * + * @inout: + * outline :: + * A pointer to the target outline descriptor. + * + * @input: + * matrix :: + * A pointer to the transformation matrix. + * + * @note: + * You can use @FT_Outline_Translate if you need to translate the + * outline's points. + */ + FT_EXPORT( void ) + FT_Outline_Transform( const FT_Outline* outline, + const FT_Matrix* matrix ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Embolden + * + * @description: + * Embolden an outline. The new outline will be at most 4~times + * `strength` pixels wider and higher. You may think of the left and + * bottom borders as unchanged. + * + * Negative `strength` values to reduce the outline thickness are + * possible also. + * + * @inout: + * outline :: + * A handle to the target outline. + * + * @input: + * strength :: + * How strong the glyph is emboldened. Expressed in 26.6 pixel format. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The used algorithm to increase or decrease the thickness of the glyph + * doesn't change the number of points; this means that certain + * situations like acute angles or intersections are sometimes handled + * incorrectly. + * + * If you need 'better' metrics values you should call + * @FT_Outline_Get_CBox or @FT_Outline_Get_BBox. + * + * To get meaningful results, font scaling values must be set with + * functions like @FT_Set_Char_Size before calling FT_Render_Glyph. + * + * @example: + * ``` + * FT_Load_Glyph( face, index, FT_LOAD_DEFAULT ); + * + * if ( face->glyph->format == FT_GLYPH_FORMAT_OUTLINE ) + * FT_Outline_Embolden( &face->glyph->outline, strength ); + * ``` + * + */ + FT_EXPORT( FT_Error ) + FT_Outline_Embolden( FT_Outline* outline, + FT_Pos strength ); + + + /************************************************************************** + * + * @function: + * FT_Outline_EmboldenXY + * + * @description: + * Embolden an outline. The new outline will be `xstrength` pixels wider + * and `ystrength` pixels higher. Otherwise, it is similar to + * @FT_Outline_Embolden, which uses the same strength in both directions. + * + * @since: + * 2.4.10 + */ + FT_EXPORT( FT_Error ) + FT_Outline_EmboldenXY( FT_Outline* outline, + FT_Pos xstrength, + FT_Pos ystrength ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Reverse + * + * @description: + * Reverse the drawing direction of an outline. This is used to ensure + * consistent fill conventions for mirrored glyphs. + * + * @inout: + * outline :: + * A pointer to the target outline descriptor. + * + * @note: + * This function toggles the bit flag @FT_OUTLINE_REVERSE_FILL in the + * outline's `flags` field. + * + * It shouldn't be used by a normal client application, unless it knows + * what it is doing. + */ + FT_EXPORT( void ) + FT_Outline_Reverse( FT_Outline* outline ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Get_Bitmap + * + * @description: + * Render an outline within a bitmap. The outline's image is simply + * OR-ed to the target bitmap. + * + * @input: + * library :: + * A handle to a FreeType library object. + * + * outline :: + * A pointer to the source outline descriptor. + * + * @inout: + * abitmap :: + * A pointer to the target bitmap descriptor. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function does **not create** the bitmap, it only renders an + * outline image within the one you pass to it! Consequently, the + * various fields in `abitmap` should be set accordingly. + * + * It will use the raster corresponding to the default glyph format. + * + * The value of the `num_grays` field in `abitmap` is ignored. If you + * select the gray-level rasterizer, and you want less than 256 gray + * levels, you have to use @FT_Outline_Render directly. + */ + FT_EXPORT( FT_Error ) + FT_Outline_Get_Bitmap( FT_Library library, + FT_Outline* outline, + const FT_Bitmap *abitmap ); + + + /************************************************************************** + * + * @function: + * FT_Outline_Render + * + * @description: + * Render an outline within a bitmap using the current scan-convert. + * + * @input: + * library :: + * A handle to a FreeType library object. + * + * outline :: + * A pointer to the source outline descriptor. + * + * @inout: + * params :: + * A pointer to an @FT_Raster_Params structure used to describe the + * rendering operation. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This advanced function uses @FT_Raster_Params as an argument. + * The field `params.source` will be set to `outline` before the scan + * converter is called, which means that the value you give to it is + * actually ignored. Either `params.target` must point to preallocated + * bitmap, or @FT_RASTER_FLAG_DIRECT must be set in `params.flags` + * allowing FreeType rasterizer to be used for direct composition, + * translucency, etc. See @FT_Raster_Params for more details. + */ + FT_EXPORT( FT_Error ) + FT_Outline_Render( FT_Library library, + FT_Outline* outline, + FT_Raster_Params* params ); + + + /************************************************************************** + * + * @enum: + * FT_Orientation + * + * @description: + * A list of values used to describe an outline's contour orientation. + * + * The TrueType and PostScript specifications use different conventions + * to determine whether outline contours should be filled or unfilled. + * + * @values: + * FT_ORIENTATION_TRUETYPE :: + * According to the TrueType specification, clockwise contours must be + * filled, and counter-clockwise ones must be unfilled. + * + * FT_ORIENTATION_POSTSCRIPT :: + * According to the PostScript specification, counter-clockwise + * contours must be filled, and clockwise ones must be unfilled. + * + * FT_ORIENTATION_FILL_RIGHT :: + * This is identical to @FT_ORIENTATION_TRUETYPE, but is used to + * remember that in TrueType, everything that is to the right of the + * drawing direction of a contour must be filled. + * + * FT_ORIENTATION_FILL_LEFT :: + * This is identical to @FT_ORIENTATION_POSTSCRIPT, but is used to + * remember that in PostScript, everything that is to the left of the + * drawing direction of a contour must be filled. + * + * FT_ORIENTATION_NONE :: + * The orientation cannot be determined. That is, different parts of + * the glyph have different orientation. + * + */ + typedef enum FT_Orientation_ + { + FT_ORIENTATION_TRUETYPE = 0, + FT_ORIENTATION_POSTSCRIPT = 1, + FT_ORIENTATION_FILL_RIGHT = FT_ORIENTATION_TRUETYPE, + FT_ORIENTATION_FILL_LEFT = FT_ORIENTATION_POSTSCRIPT, + FT_ORIENTATION_NONE + + } FT_Orientation; + + + /************************************************************************** + * + * @function: + * FT_Outline_Get_Orientation + * + * @description: + * This function analyzes a glyph outline and tries to compute its fill + * orientation (see @FT_Orientation). This is done by integrating the + * total area covered by the outline. The positive integral corresponds + * to the clockwise orientation and @FT_ORIENTATION_POSTSCRIPT is + * returned. The negative integral corresponds to the counter-clockwise + * orientation and @FT_ORIENTATION_TRUETYPE is returned. + * + * Note that this will return @FT_ORIENTATION_TRUETYPE for empty + * outlines. + * + * @input: + * outline :: + * A handle to the source outline. + * + * @return: + * The orientation. + * + */ + FT_EXPORT( FT_Orientation ) + FT_Outline_Get_Orientation( FT_Outline* outline ); + + + /* */ + + +FT_END_HEADER + +#endif /* FTOUTLN_H_ */ + + +/* END */ + + +/* Local Variables: */ +/* coding: utf-8 */ +/* End: */ diff --git a/Example/include/freetype/ftparams.h b/Example/include/freetype/ftparams.h new file mode 100644 index 0000000..6a9f243 --- /dev/null +++ b/Example/include/freetype/ftparams.h @@ -0,0 +1,218 @@ +/**************************************************************************** + * + * ftparams.h + * + * FreeType API for possible FT_Parameter tags (specification only). + * + * Copyright (C) 2017-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTPARAMS_H_ +#define FTPARAMS_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * parameter_tags + * + * @title: + * Parameter Tags + * + * @abstract: + * Macros for driver property and font loading parameter tags. + * + * @description: + * This section contains macros for the @FT_Parameter structure that are + * used with various functions to activate some special functionality or + * different behaviour of various components of FreeType. + * + */ + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_IGNORE_TYPOGRAPHIC_FAMILY + * + * @description: + * A tag for @FT_Parameter to make @FT_Open_Face ignore typographic + * family names in the 'name' table (introduced in OpenType version 1.4). + * Use this for backward compatibility with legacy systems that have a + * four-faces-per-family restriction. + * + * @since: + * 2.8 + * + */ +#define FT_PARAM_TAG_IGNORE_TYPOGRAPHIC_FAMILY \ + FT_MAKE_TAG( 'i', 'g', 'p', 'f' ) + + + /* this constant is deprecated */ +#define FT_PARAM_TAG_IGNORE_PREFERRED_FAMILY \ + FT_PARAM_TAG_IGNORE_TYPOGRAPHIC_FAMILY + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_IGNORE_TYPOGRAPHIC_SUBFAMILY + * + * @description: + * A tag for @FT_Parameter to make @FT_Open_Face ignore typographic + * subfamily names in the 'name' table (introduced in OpenType version + * 1.4). Use this for backward compatibility with legacy systems that + * have a four-faces-per-family restriction. + * + * @since: + * 2.8 + * + */ +#define FT_PARAM_TAG_IGNORE_TYPOGRAPHIC_SUBFAMILY \ + FT_MAKE_TAG( 'i', 'g', 'p', 's' ) + + + /* this constant is deprecated */ +#define FT_PARAM_TAG_IGNORE_PREFERRED_SUBFAMILY \ + FT_PARAM_TAG_IGNORE_TYPOGRAPHIC_SUBFAMILY + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_INCREMENTAL + * + * @description: + * An @FT_Parameter tag to be used with @FT_Open_Face to indicate + * incremental glyph loading. + * + */ +#define FT_PARAM_TAG_INCREMENTAL \ + FT_MAKE_TAG( 'i', 'n', 'c', 'r' ) + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_IGNORE_SBIX + * + * @description: + * A tag for @FT_Parameter to make @FT_Open_Face ignore an 'sbix' table + * while loading a font. Use this if @FT_FACE_FLAG_SBIX is set and you + * want to access the outline glyphs in the font. + * + */ +#define FT_PARAM_TAG_IGNORE_SBIX \ + FT_MAKE_TAG( 'i', 's', 'b', 'x' ) + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_LCD_FILTER_WEIGHTS + * + * @description: + * An @FT_Parameter tag to be used with @FT_Face_Properties. The + * corresponding argument specifies the five LCD filter weights for a + * given face (if using @FT_LOAD_TARGET_LCD, for example), overriding the + * global default values or the values set up with + * @FT_Library_SetLcdFilterWeights. + * + * @since: + * 2.8 + * + */ +#define FT_PARAM_TAG_LCD_FILTER_WEIGHTS \ + FT_MAKE_TAG( 'l', 'c', 'd', 'f' ) + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_RANDOM_SEED + * + * @description: + * An @FT_Parameter tag to be used with @FT_Face_Properties. The + * corresponding 32bit signed integer argument overrides the font + * driver's random seed value with a face-specific one; see @random-seed. + * + * @since: + * 2.8 + * + */ +#define FT_PARAM_TAG_RANDOM_SEED \ + FT_MAKE_TAG( 's', 'e', 'e', 'd' ) + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_STEM_DARKENING + * + * @description: + * An @FT_Parameter tag to be used with @FT_Face_Properties. The + * corresponding Boolean argument specifies whether to apply stem + * darkening, overriding the global default values or the values set up + * with @FT_Property_Set (see @no-stem-darkening). + * + * This is a passive setting that only takes effect if the font driver or + * autohinter honors it, which the CFF, Type~1, and CID drivers always + * do, but the autohinter only in 'light' hinting mode (as of version + * 2.9). + * + * @since: + * 2.8 + * + */ +#define FT_PARAM_TAG_STEM_DARKENING \ + FT_MAKE_TAG( 'd', 'a', 'r', 'k' ) + + + /************************************************************************** + * + * @enum: + * FT_PARAM_TAG_UNPATENTED_HINTING + * + * @description: + * Deprecated, no effect. + * + * Previously: A constant used as the tag of an @FT_Parameter structure + * to indicate that unpatented methods only should be used by the + * TrueType bytecode interpreter for a typeface opened by @FT_Open_Face. + * + */ +#define FT_PARAM_TAG_UNPATENTED_HINTING \ + FT_MAKE_TAG( 'u', 'n', 'p', 'a' ) + + + /* */ + + +FT_END_HEADER + + +#endif /* FTPARAMS_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftpfr.h b/Example/include/freetype/ftpfr.h new file mode 100644 index 0000000..7111d40 --- /dev/null +++ b/Example/include/freetype/ftpfr.h @@ -0,0 +1,179 @@ +/**************************************************************************** + * + * ftpfr.h + * + * FreeType API for accessing PFR-specific data (specification only). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTPFR_H_ +#define FTPFR_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * pfr_fonts + * + * @title: + * PFR Fonts + * + * @abstract: + * PFR/TrueDoc-specific API. + * + * @description: + * This section contains the declaration of PFR-specific functions. + * + */ + + + /************************************************************************** + * + * @function: + * FT_Get_PFR_Metrics + * + * @description: + * Return the outline and metrics resolutions of a given PFR face. + * + * @input: + * face :: + * Handle to the input face. It can be a non-PFR face. + * + * @output: + * aoutline_resolution :: + * Outline resolution. This is equivalent to `face->units_per_EM` for + * non-PFR fonts. Optional (parameter can be `NULL`). + * + * ametrics_resolution :: + * Metrics resolution. This is equivalent to `outline_resolution` for + * non-PFR fonts. Optional (parameter can be `NULL`). + * + * ametrics_x_scale :: + * A 16.16 fixed-point number used to scale distance expressed in + * metrics units to device subpixels. This is equivalent to + * `face->size->x_scale`, but for metrics only. Optional (parameter + * can be `NULL`). + * + * ametrics_y_scale :: + * Same as `ametrics_x_scale` but for the vertical direction. + * optional (parameter can be `NULL`). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If the input face is not a PFR, this function will return an error. + * However, in all cases, it will return valid values. + */ + FT_EXPORT( FT_Error ) + FT_Get_PFR_Metrics( FT_Face face, + FT_UInt *aoutline_resolution, + FT_UInt *ametrics_resolution, + FT_Fixed *ametrics_x_scale, + FT_Fixed *ametrics_y_scale ); + + + /************************************************************************** + * + * @function: + * FT_Get_PFR_Kerning + * + * @description: + * Return the kerning pair corresponding to two glyphs in a PFR face. + * The distance is expressed in metrics units, unlike the result of + * @FT_Get_Kerning. + * + * @input: + * face :: + * A handle to the input face. + * + * left :: + * Index of the left glyph. + * + * right :: + * Index of the right glyph. + * + * @output: + * avector :: + * A kerning vector. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function always return distances in original PFR metrics units. + * This is unlike @FT_Get_Kerning with the @FT_KERNING_UNSCALED mode, + * which always returns distances converted to outline units. + * + * You can use the value of the `x_scale` and `y_scale` parameters + * returned by @FT_Get_PFR_Metrics to scale these to device subpixels. + */ + FT_EXPORT( FT_Error ) + FT_Get_PFR_Kerning( FT_Face face, + FT_UInt left, + FT_UInt right, + FT_Vector *avector ); + + + /************************************************************************** + * + * @function: + * FT_Get_PFR_Advance + * + * @description: + * Return a given glyph advance, expressed in original metrics units, + * from a PFR font. + * + * @input: + * face :: + * A handle to the input face. + * + * gindex :: + * The glyph index. + * + * @output: + * aadvance :: + * The glyph advance in metrics units. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You can use the `x_scale` or `y_scale` results of @FT_Get_PFR_Metrics + * to convert the advance to device subpixels (i.e., 1/64 of pixels). + */ + FT_EXPORT( FT_Error ) + FT_Get_PFR_Advance( FT_Face face, + FT_UInt gindex, + FT_Pos *aadvance ); + + /* */ + + +FT_END_HEADER + +#endif /* FTPFR_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftrender.h b/Example/include/freetype/ftrender.h new file mode 100644 index 0000000..0b6fad3 --- /dev/null +++ b/Example/include/freetype/ftrender.h @@ -0,0 +1,244 @@ +/**************************************************************************** + * + * ftrender.h + * + * FreeType renderer modules public interface (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTRENDER_H_ +#define FTRENDER_H_ + + +#include +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * module_management + * + */ + + + /* create a new glyph object */ + typedef FT_Error + (*FT_Glyph_InitFunc)( FT_Glyph glyph, + FT_GlyphSlot slot ); + + /* destroys a given glyph object */ + typedef void + (*FT_Glyph_DoneFunc)( FT_Glyph glyph ); + + typedef void + (*FT_Glyph_TransformFunc)( FT_Glyph glyph, + const FT_Matrix* matrix, + const FT_Vector* delta ); + + typedef void + (*FT_Glyph_GetBBoxFunc)( FT_Glyph glyph, + FT_BBox* abbox ); + + typedef FT_Error + (*FT_Glyph_CopyFunc)( FT_Glyph source, + FT_Glyph target ); + + typedef FT_Error + (*FT_Glyph_PrepareFunc)( FT_Glyph glyph, + FT_GlyphSlot slot ); + +/* deprecated */ +#define FT_Glyph_Init_Func FT_Glyph_InitFunc +#define FT_Glyph_Done_Func FT_Glyph_DoneFunc +#define FT_Glyph_Transform_Func FT_Glyph_TransformFunc +#define FT_Glyph_BBox_Func FT_Glyph_GetBBoxFunc +#define FT_Glyph_Copy_Func FT_Glyph_CopyFunc +#define FT_Glyph_Prepare_Func FT_Glyph_PrepareFunc + + + struct FT_Glyph_Class_ + { + FT_Long glyph_size; + FT_Glyph_Format glyph_format; + + FT_Glyph_InitFunc glyph_init; + FT_Glyph_DoneFunc glyph_done; + FT_Glyph_CopyFunc glyph_copy; + FT_Glyph_TransformFunc glyph_transform; + FT_Glyph_GetBBoxFunc glyph_bbox; + FT_Glyph_PrepareFunc glyph_prepare; + }; + + + typedef FT_Error + (*FT_Renderer_RenderFunc)( FT_Renderer renderer, + FT_GlyphSlot slot, + FT_Render_Mode mode, + const FT_Vector* origin ); + + typedef FT_Error + (*FT_Renderer_TransformFunc)( FT_Renderer renderer, + FT_GlyphSlot slot, + const FT_Matrix* matrix, + const FT_Vector* delta ); + + + typedef void + (*FT_Renderer_GetCBoxFunc)( FT_Renderer renderer, + FT_GlyphSlot slot, + FT_BBox* cbox ); + + + typedef FT_Error + (*FT_Renderer_SetModeFunc)( FT_Renderer renderer, + FT_ULong mode_tag, + FT_Pointer mode_ptr ); + +/* deprecated identifiers */ +#define FTRenderer_render FT_Renderer_RenderFunc +#define FTRenderer_transform FT_Renderer_TransformFunc +#define FTRenderer_getCBox FT_Renderer_GetCBoxFunc +#define FTRenderer_setMode FT_Renderer_SetModeFunc + + + /************************************************************************** + * + * @struct: + * FT_Renderer_Class + * + * @description: + * The renderer module class descriptor. + * + * @fields: + * root :: + * The root @FT_Module_Class fields. + * + * glyph_format :: + * The glyph image format this renderer handles. + * + * render_glyph :: + * A method used to render the image that is in a given glyph slot into + * a bitmap. + * + * transform_glyph :: + * A method used to transform the image that is in a given glyph slot. + * + * get_glyph_cbox :: + * A method used to access the glyph's cbox. + * + * set_mode :: + * A method used to pass additional parameters. + * + * raster_class :: + * For @FT_GLYPH_FORMAT_OUTLINE renderers only. This is a pointer to + * its raster's class. + */ + typedef struct FT_Renderer_Class_ + { + FT_Module_Class root; + + FT_Glyph_Format glyph_format; + + FT_Renderer_RenderFunc render_glyph; + FT_Renderer_TransformFunc transform_glyph; + FT_Renderer_GetCBoxFunc get_glyph_cbox; + FT_Renderer_SetModeFunc set_mode; + + const FT_Raster_Funcs* raster_class; + + } FT_Renderer_Class; + + + /************************************************************************** + * + * @function: + * FT_Get_Renderer + * + * @description: + * Retrieve the current renderer for a given glyph format. + * + * @input: + * library :: + * A handle to the library object. + * + * format :: + * The glyph format. + * + * @return: + * A renderer handle. 0~if none found. + * + * @note: + * An error will be returned if a module already exists by that name, or + * if the module requires a version of FreeType that is too great. + * + * To add a new renderer, simply use @FT_Add_Module. To retrieve a + * renderer by its name, use @FT_Get_Module. + */ + FT_EXPORT( FT_Renderer ) + FT_Get_Renderer( FT_Library library, + FT_Glyph_Format format ); + + + /************************************************************************** + * + * @function: + * FT_Set_Renderer + * + * @description: + * Set the current renderer to use, and set additional mode. + * + * @inout: + * library :: + * A handle to the library object. + * + * @input: + * renderer :: + * A handle to the renderer object. + * + * num_params :: + * The number of additional parameters. + * + * parameters :: + * Additional parameters. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * In case of success, the renderer will be used to convert glyph images + * in the renderer's known format into bitmaps. + * + * This doesn't change the current renderer for other formats. + * + * Currently, no FreeType renderer module uses `parameters`; you should + * thus always pass `NULL` as the value. + */ + FT_EXPORT( FT_Error ) + FT_Set_Renderer( FT_Library library, + FT_Renderer renderer, + FT_UInt num_params, + FT_Parameter* parameters ); + + /* */ + + +FT_END_HEADER + +#endif /* FTRENDER_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftsizes.h b/Example/include/freetype/ftsizes.h new file mode 100644 index 0000000..7bfb1ae --- /dev/null +++ b/Example/include/freetype/ftsizes.h @@ -0,0 +1,159 @@ +/**************************************************************************** + * + * ftsizes.h + * + * FreeType size objects management (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * Typical application would normally not need to use these functions. + * However, they have been placed in a public API for the rare cases where + * they are needed. + * + */ + + +#ifndef FTSIZES_H_ +#define FTSIZES_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * sizes_management + * + * @title: + * Size Management + * + * @abstract: + * Managing multiple sizes per face. + * + * @description: + * When creating a new face object (e.g., with @FT_New_Face), an @FT_Size + * object is automatically created and used to store all pixel-size + * dependent information, available in the `face->size` field. + * + * It is however possible to create more sizes for a given face, mostly + * in order to manage several character pixel sizes of the same font + * family and style. See @FT_New_Size and @FT_Done_Size. + * + * Note that @FT_Set_Pixel_Sizes and @FT_Set_Char_Size only modify the + * contents of the current 'active' size; you thus need to use + * @FT_Activate_Size to change it. + * + * 99% of applications won't need the functions provided here, especially + * if they use the caching sub-system, so be cautious when using these. + * + */ + + + /************************************************************************** + * + * @function: + * FT_New_Size + * + * @description: + * Create a new size object from a given face object. + * + * @input: + * face :: + * A handle to a parent face object. + * + * @output: + * asize :: + * A handle to a new size object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You need to call @FT_Activate_Size in order to select the new size for + * upcoming calls to @FT_Set_Pixel_Sizes, @FT_Set_Char_Size, + * @FT_Load_Glyph, @FT_Load_Char, etc. + */ + FT_EXPORT( FT_Error ) + FT_New_Size( FT_Face face, + FT_Size* size ); + + + /************************************************************************** + * + * @function: + * FT_Done_Size + * + * @description: + * Discard a given size object. Note that @FT_Done_Face automatically + * discards all size objects allocated with @FT_New_Size. + * + * @input: + * size :: + * A handle to a target size object. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Done_Size( FT_Size size ); + + + /************************************************************************** + * + * @function: + * FT_Activate_Size + * + * @description: + * Even though it is possible to create several size objects for a given + * face (see @FT_New_Size for details), functions like @FT_Load_Glyph or + * @FT_Load_Char only use the one that has been activated last to + * determine the 'current character pixel size'. + * + * This function can be used to 'activate' a previously created size + * object. + * + * @input: + * size :: + * A handle to a target size object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If `face` is the size's parent face object, this function changes the + * value of `face->size` to the input size handle. + */ + FT_EXPORT( FT_Error ) + FT_Activate_Size( FT_Size size ); + + /* */ + + +FT_END_HEADER + +#endif /* FTSIZES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftsnames.h b/Example/include/freetype/ftsnames.h new file mode 100644 index 0000000..9d5d22b --- /dev/null +++ b/Example/include/freetype/ftsnames.h @@ -0,0 +1,272 @@ +/**************************************************************************** + * + * ftsnames.h + * + * Simple interface to access SFNT 'name' tables (which are used + * to hold font names, copyright info, notices, etc.) (specification). + * + * This is _not_ used to retrieve glyph names! + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTSNAMES_H_ +#define FTSNAMES_H_ + + +#include +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * sfnt_names + * + * @title: + * SFNT Names + * + * @abstract: + * Access the names embedded in TrueType and OpenType files. + * + * @description: + * The TrueType and OpenType specifications allow the inclusion of a + * special names table ('name') in font files. This table contains + * textual (and internationalized) information regarding the font, like + * family name, copyright, version, etc. + * + * The definitions below are used to access them if available. + * + * Note that this has nothing to do with glyph names! + * + */ + + + /************************************************************************** + * + * @struct: + * FT_SfntName + * + * @description: + * A structure used to model an SFNT 'name' table entry. + * + * @fields: + * platform_id :: + * The platform ID for `string`. See @TT_PLATFORM_XXX for possible + * values. + * + * encoding_id :: + * The encoding ID for `string`. See @TT_APPLE_ID_XXX, @TT_MAC_ID_XXX, + * @TT_ISO_ID_XXX, @TT_MS_ID_XXX, and @TT_ADOBE_ID_XXX for possible + * values. + * + * language_id :: + * The language ID for `string`. See @TT_MAC_LANGID_XXX and + * @TT_MS_LANGID_XXX for possible values. + * + * Registered OpenType values for `language_id` are always smaller than + * 0x8000; values equal or larger than 0x8000 usually indicate a + * language tag string (introduced in OpenType version 1.6). Use + * function @FT_Get_Sfnt_LangTag with `language_id` as its argument to + * retrieve the associated language tag. + * + * name_id :: + * An identifier for `string`. See @TT_NAME_ID_XXX for possible + * values. + * + * string :: + * The 'name' string. Note that its format differs depending on the + * (platform,encoding) pair, being either a string of bytes (without a + * terminating `NULL` byte) or containing UTF-16BE entities. + * + * string_len :: + * The length of `string` in bytes. + * + * @note: + * Please refer to the TrueType or OpenType specification for more + * details. + */ + typedef struct FT_SfntName_ + { + FT_UShort platform_id; + FT_UShort encoding_id; + FT_UShort language_id; + FT_UShort name_id; + + FT_Byte* string; /* this string is *not* null-terminated! */ + FT_UInt string_len; /* in bytes */ + + } FT_SfntName; + + + /************************************************************************** + * + * @function: + * FT_Get_Sfnt_Name_Count + * + * @description: + * Retrieve the number of name strings in the SFNT 'name' table. + * + * @input: + * face :: + * A handle to the source face. + * + * @return: + * The number of strings in the 'name' table. + * + * @note: + * This function always returns an error if the config macro + * `TT_CONFIG_OPTION_SFNT_NAMES` is not defined in `ftoption.h`. + */ + FT_EXPORT( FT_UInt ) + FT_Get_Sfnt_Name_Count( FT_Face face ); + + + /************************************************************************** + * + * @function: + * FT_Get_Sfnt_Name + * + * @description: + * Retrieve a string of the SFNT 'name' table for a given index. + * + * @input: + * face :: + * A handle to the source face. + * + * idx :: + * The index of the 'name' string. + * + * @output: + * aname :: + * The indexed @FT_SfntName structure. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The `string` array returned in the `aname` structure is not + * null-terminated. Note that you don't have to deallocate `string` by + * yourself; FreeType takes care of it if you call @FT_Done_Face. + * + * Use @FT_Get_Sfnt_Name_Count to get the total number of available + * 'name' table entries, then do a loop until you get the right platform, + * encoding, and name ID. + * + * 'name' table format~1 entries can use language tags also, see + * @FT_Get_Sfnt_LangTag. + * + * This function always returns an error if the config macro + * `TT_CONFIG_OPTION_SFNT_NAMES` is not defined in `ftoption.h`. + */ + FT_EXPORT( FT_Error ) + FT_Get_Sfnt_Name( FT_Face face, + FT_UInt idx, + FT_SfntName *aname ); + + + /************************************************************************** + * + * @struct: + * FT_SfntLangTag + * + * @description: + * A structure to model a language tag entry from an SFNT 'name' table. + * + * @fields: + * string :: + * The language tag string, encoded in UTF-16BE (without trailing + * `NULL` bytes). + * + * string_len :: + * The length of `string` in **bytes**. + * + * @note: + * Please refer to the TrueType or OpenType specification for more + * details. + * + * @since: + * 2.8 + */ + typedef struct FT_SfntLangTag_ + { + FT_Byte* string; /* this string is *not* null-terminated! */ + FT_UInt string_len; /* in bytes */ + + } FT_SfntLangTag; + + + /************************************************************************** + * + * @function: + * FT_Get_Sfnt_LangTag + * + * @description: + * Retrieve the language tag associated with a language ID of an SFNT + * 'name' table entry. + * + * @input: + * face :: + * A handle to the source face. + * + * langID :: + * The language ID, as returned by @FT_Get_Sfnt_Name. This is always a + * value larger than 0x8000. + * + * @output: + * alangTag :: + * The language tag associated with the 'name' table entry's language + * ID. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The `string` array returned in the `alangTag` structure is not + * null-terminated. Note that you don't have to deallocate `string` by + * yourself; FreeType takes care of it if you call @FT_Done_Face. + * + * Only 'name' table format~1 supports language tags. For format~0 + * tables, this function always returns FT_Err_Invalid_Table. For + * invalid format~1 language ID values, FT_Err_Invalid_Argument is + * returned. + * + * This function always returns an error if the config macro + * `TT_CONFIG_OPTION_SFNT_NAMES` is not defined in `ftoption.h`. + * + * @since: + * 2.8 + */ + FT_EXPORT( FT_Error ) + FT_Get_Sfnt_LangTag( FT_Face face, + FT_UInt langID, + FT_SfntLangTag *alangTag ); + + + /* */ + + +FT_END_HEADER + +#endif /* FTSNAMES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftstroke.h b/Example/include/freetype/ftstroke.h new file mode 100644 index 0000000..b3d9080 --- /dev/null +++ b/Example/include/freetype/ftstroke.h @@ -0,0 +1,773 @@ +/**************************************************************************** + * + * ftstroke.h + * + * FreeType path stroker (specification). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTSTROKE_H_ +#define FTSTROKE_H_ + +#include +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * glyph_stroker + * + * @title: + * Glyph Stroker + * + * @abstract: + * Generating bordered and stroked glyphs. + * + * @description: + * This component generates stroked outlines of a given vectorial glyph. + * It also allows you to retrieve the 'outside' and/or the 'inside' + * borders of the stroke. + * + * This can be useful to generate 'bordered' glyph, i.e., glyphs + * displayed with a colored (and anti-aliased) border around their + * shape. + * + * @order: + * FT_Stroker + * + * FT_Stroker_LineJoin + * FT_Stroker_LineCap + * FT_StrokerBorder + * + * FT_Outline_GetInsideBorder + * FT_Outline_GetOutsideBorder + * + * FT_Glyph_Stroke + * FT_Glyph_StrokeBorder + * + * FT_Stroker_New + * FT_Stroker_Set + * FT_Stroker_Rewind + * FT_Stroker_ParseOutline + * FT_Stroker_Done + * + * FT_Stroker_BeginSubPath + * FT_Stroker_EndSubPath + * + * FT_Stroker_LineTo + * FT_Stroker_ConicTo + * FT_Stroker_CubicTo + * + * FT_Stroker_GetBorderCounts + * FT_Stroker_ExportBorder + * FT_Stroker_GetCounts + * FT_Stroker_Export + * + */ + + + /************************************************************************** + * + * @type: + * FT_Stroker + * + * @description: + * Opaque handle to a path stroker object. + */ + typedef struct FT_StrokerRec_* FT_Stroker; + + + /************************************************************************** + * + * @enum: + * FT_Stroker_LineJoin + * + * @description: + * These values determine how two joining lines are rendered in a + * stroker. + * + * @values: + * FT_STROKER_LINEJOIN_ROUND :: + * Used to render rounded line joins. Circular arcs are used to join + * two lines smoothly. + * + * FT_STROKER_LINEJOIN_BEVEL :: + * Used to render beveled line joins. The outer corner of the joined + * lines is filled by enclosing the triangular region of the corner + * with a straight line between the outer corners of each stroke. + * + * FT_STROKER_LINEJOIN_MITER_FIXED :: + * Used to render mitered line joins, with fixed bevels if the miter + * limit is exceeded. The outer edges of the strokes for the two + * segments are extended until they meet at an angle. A bevel join + * (see above) is used if the segments meet at too sharp an angle and + * the outer edges meet beyond a distance corresponding to the meter + * limit. This prevents long spikes being created. + * `FT_STROKER_LINEJOIN_MITER_FIXED` generates a miter line join as + * used in PostScript and PDF. + * + * FT_STROKER_LINEJOIN_MITER_VARIABLE :: + * FT_STROKER_LINEJOIN_MITER :: + * Used to render mitered line joins, with variable bevels if the miter + * limit is exceeded. The intersection of the strokes is clipped + * perpendicularly to the bisector, at a distance corresponding to + * the miter limit. This prevents long spikes being created. + * `FT_STROKER_LINEJOIN_MITER_VARIABLE` generates a mitered line join + * as used in XPS. `FT_STROKER_LINEJOIN_MITER` is an alias for + * `FT_STROKER_LINEJOIN_MITER_VARIABLE`, retained for backward + * compatibility. + */ + typedef enum FT_Stroker_LineJoin_ + { + FT_STROKER_LINEJOIN_ROUND = 0, + FT_STROKER_LINEJOIN_BEVEL = 1, + FT_STROKER_LINEJOIN_MITER_VARIABLE = 2, + FT_STROKER_LINEJOIN_MITER = FT_STROKER_LINEJOIN_MITER_VARIABLE, + FT_STROKER_LINEJOIN_MITER_FIXED = 3 + + } FT_Stroker_LineJoin; + + + /************************************************************************** + * + * @enum: + * FT_Stroker_LineCap + * + * @description: + * These values determine how the end of opened sub-paths are rendered in + * a stroke. + * + * @values: + * FT_STROKER_LINECAP_BUTT :: + * The end of lines is rendered as a full stop on the last point + * itself. + * + * FT_STROKER_LINECAP_ROUND :: + * The end of lines is rendered as a half-circle around the last point. + * + * FT_STROKER_LINECAP_SQUARE :: + * The end of lines is rendered as a square around the last point. + */ + typedef enum FT_Stroker_LineCap_ + { + FT_STROKER_LINECAP_BUTT = 0, + FT_STROKER_LINECAP_ROUND, + FT_STROKER_LINECAP_SQUARE + + } FT_Stroker_LineCap; + + + /************************************************************************** + * + * @enum: + * FT_StrokerBorder + * + * @description: + * These values are used to select a given stroke border in + * @FT_Stroker_GetBorderCounts and @FT_Stroker_ExportBorder. + * + * @values: + * FT_STROKER_BORDER_LEFT :: + * Select the left border, relative to the drawing direction. + * + * FT_STROKER_BORDER_RIGHT :: + * Select the right border, relative to the drawing direction. + * + * @note: + * Applications are generally interested in the 'inside' and 'outside' + * borders. However, there is no direct mapping between these and the + * 'left' and 'right' ones, since this really depends on the glyph's + * drawing orientation, which varies between font formats. + * + * You can however use @FT_Outline_GetInsideBorder and + * @FT_Outline_GetOutsideBorder to get these. + */ + typedef enum FT_StrokerBorder_ + { + FT_STROKER_BORDER_LEFT = 0, + FT_STROKER_BORDER_RIGHT + + } FT_StrokerBorder; + + + /************************************************************************** + * + * @function: + * FT_Outline_GetInsideBorder + * + * @description: + * Retrieve the @FT_StrokerBorder value corresponding to the 'inside' + * borders of a given outline. + * + * @input: + * outline :: + * The source outline handle. + * + * @return: + * The border index. @FT_STROKER_BORDER_RIGHT for empty or invalid + * outlines. + */ + FT_EXPORT( FT_StrokerBorder ) + FT_Outline_GetInsideBorder( FT_Outline* outline ); + + + /************************************************************************** + * + * @function: + * FT_Outline_GetOutsideBorder + * + * @description: + * Retrieve the @FT_StrokerBorder value corresponding to the 'outside' + * borders of a given outline. + * + * @input: + * outline :: + * The source outline handle. + * + * @return: + * The border index. @FT_STROKER_BORDER_LEFT for empty or invalid + * outlines. + */ + FT_EXPORT( FT_StrokerBorder ) + FT_Outline_GetOutsideBorder( FT_Outline* outline ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_New + * + * @description: + * Create a new stroker object. + * + * @input: + * library :: + * FreeType library handle. + * + * @output: + * astroker :: + * A new stroker object handle. `NULL` in case of error. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_New( FT_Library library, + FT_Stroker *astroker ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_Set + * + * @description: + * Reset a stroker object's attributes. + * + * @input: + * stroker :: + * The target stroker handle. + * + * radius :: + * The border radius. + * + * line_cap :: + * The line cap style. + * + * line_join :: + * The line join style. + * + * miter_limit :: + * The maximum reciprocal sine of half-angle at the miter join, + * expressed as 16.16 fixed-point value. + * + * @note: + * The `radius` is expressed in the same units as the outline + * coordinates. + * + * The `miter_limit` multiplied by the `radius` gives the maximum size + * of a miter spike, at which it is clipped for + * @FT_STROKER_LINEJOIN_MITER_VARIABLE or replaced with a bevel join for + * @FT_STROKER_LINEJOIN_MITER_FIXED. + * + * This function calls @FT_Stroker_Rewind automatically. + */ + FT_EXPORT( void ) + FT_Stroker_Set( FT_Stroker stroker, + FT_Fixed radius, + FT_Stroker_LineCap line_cap, + FT_Stroker_LineJoin line_join, + FT_Fixed miter_limit ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_Rewind + * + * @description: + * Reset a stroker object without changing its attributes. You should + * call this function before beginning a new series of calls to + * @FT_Stroker_BeginSubPath or @FT_Stroker_EndSubPath. + * + * @input: + * stroker :: + * The target stroker handle. + */ + FT_EXPORT( void ) + FT_Stroker_Rewind( FT_Stroker stroker ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_ParseOutline + * + * @description: + * A convenience function used to parse a whole outline with the stroker. + * The resulting outline(s) can be retrieved later by functions like + * @FT_Stroker_GetCounts and @FT_Stroker_Export. + * + * @input: + * stroker :: + * The target stroker handle. + * + * outline :: + * The source outline. + * + * opened :: + * A boolean. If~1, the outline is treated as an open path instead of + * a closed one. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If `opened` is~0 (the default), the outline is treated as a closed + * path, and the stroker generates two distinct 'border' outlines. + * + * If `opened` is~1, the outline is processed as an open path, and the + * stroker generates a single 'stroke' outline. + * + * This function calls @FT_Stroker_Rewind automatically. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_ParseOutline( FT_Stroker stroker, + FT_Outline* outline, + FT_Bool opened ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_BeginSubPath + * + * @description: + * Start a new sub-path in the stroker. + * + * @input: + * stroker :: + * The target stroker handle. + * + * to :: + * A pointer to the start vector. + * + * open :: + * A boolean. If~1, the sub-path is treated as an open one. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function is useful when you need to stroke a path that is not + * stored as an @FT_Outline object. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_BeginSubPath( FT_Stroker stroker, + FT_Vector* to, + FT_Bool open ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_EndSubPath + * + * @description: + * Close the current sub-path in the stroker. + * + * @input: + * stroker :: + * The target stroker handle. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You should call this function after @FT_Stroker_BeginSubPath. If the + * subpath was not 'opened', this function 'draws' a single line segment + * to the start position when needed. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_EndSubPath( FT_Stroker stroker ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_LineTo + * + * @description: + * 'Draw' a single line segment in the stroker's current sub-path, from + * the last position. + * + * @input: + * stroker :: + * The target stroker handle. + * + * to :: + * A pointer to the destination point. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You should call this function between @FT_Stroker_BeginSubPath and + * @FT_Stroker_EndSubPath. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_LineTo( FT_Stroker stroker, + FT_Vector* to ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_ConicTo + * + * @description: + * 'Draw' a single quadratic Bezier in the stroker's current sub-path, + * from the last position. + * + * @input: + * stroker :: + * The target stroker handle. + * + * control :: + * A pointer to a Bezier control point. + * + * to :: + * A pointer to the destination point. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You should call this function between @FT_Stroker_BeginSubPath and + * @FT_Stroker_EndSubPath. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_ConicTo( FT_Stroker stroker, + FT_Vector* control, + FT_Vector* to ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_CubicTo + * + * @description: + * 'Draw' a single cubic Bezier in the stroker's current sub-path, from + * the last position. + * + * @input: + * stroker :: + * The target stroker handle. + * + * control1 :: + * A pointer to the first Bezier control point. + * + * control2 :: + * A pointer to second Bezier control point. + * + * to :: + * A pointer to the destination point. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * You should call this function between @FT_Stroker_BeginSubPath and + * @FT_Stroker_EndSubPath. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_CubicTo( FT_Stroker stroker, + FT_Vector* control1, + FT_Vector* control2, + FT_Vector* to ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_GetBorderCounts + * + * @description: + * Call this function once you have finished parsing your paths with the + * stroker. It returns the number of points and contours necessary to + * export one of the 'border' or 'stroke' outlines generated by the + * stroker. + * + * @input: + * stroker :: + * The target stroker handle. + * + * border :: + * The border index. + * + * @output: + * anum_points :: + * The number of points. + * + * anum_contours :: + * The number of contours. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * When an outline, or a sub-path, is 'closed', the stroker generates two + * independent 'border' outlines, named 'left' and 'right'. + * + * When the outline, or a sub-path, is 'opened', the stroker merges the + * 'border' outlines with caps. The 'left' border receives all points, + * while the 'right' border becomes empty. + * + * Use the function @FT_Stroker_GetCounts instead if you want to retrieve + * the counts associated to both borders. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_GetBorderCounts( FT_Stroker stroker, + FT_StrokerBorder border, + FT_UInt *anum_points, + FT_UInt *anum_contours ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_ExportBorder + * + * @description: + * Call this function after @FT_Stroker_GetBorderCounts to export the + * corresponding border to your own @FT_Outline structure. + * + * Note that this function appends the border points and contours to your + * outline, but does not try to resize its arrays. + * + * @input: + * stroker :: + * The target stroker handle. + * + * border :: + * The border index. + * + * outline :: + * The target outline handle. + * + * @note: + * Always call this function after @FT_Stroker_GetBorderCounts to get + * sure that there is enough room in your @FT_Outline object to receive + * all new data. + * + * When an outline, or a sub-path, is 'closed', the stroker generates two + * independent 'border' outlines, named 'left' and 'right'. + * + * When the outline, or a sub-path, is 'opened', the stroker merges the + * 'border' outlines with caps. The 'left' border receives all points, + * while the 'right' border becomes empty. + * + * Use the function @FT_Stroker_Export instead if you want to retrieve + * all borders at once. + */ + FT_EXPORT( void ) + FT_Stroker_ExportBorder( FT_Stroker stroker, + FT_StrokerBorder border, + FT_Outline* outline ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_GetCounts + * + * @description: + * Call this function once you have finished parsing your paths with the + * stroker. It returns the number of points and contours necessary to + * export all points/borders from the stroked outline/path. + * + * @input: + * stroker :: + * The target stroker handle. + * + * @output: + * anum_points :: + * The number of points. + * + * anum_contours :: + * The number of contours. + * + * @return: + * FreeType error code. 0~means success. + */ + FT_EXPORT( FT_Error ) + FT_Stroker_GetCounts( FT_Stroker stroker, + FT_UInt *anum_points, + FT_UInt *anum_contours ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_Export + * + * @description: + * Call this function after @FT_Stroker_GetBorderCounts to export all + * borders to your own @FT_Outline structure. + * + * Note that this function appends the border points and contours to your + * outline, but does not try to resize its arrays. + * + * @input: + * stroker :: + * The target stroker handle. + * + * outline :: + * The target outline handle. + */ + FT_EXPORT( void ) + FT_Stroker_Export( FT_Stroker stroker, + FT_Outline* outline ); + + + /************************************************************************** + * + * @function: + * FT_Stroker_Done + * + * @description: + * Destroy a stroker object. + * + * @input: + * stroker :: + * A stroker handle. Can be `NULL`. + */ + FT_EXPORT( void ) + FT_Stroker_Done( FT_Stroker stroker ); + + + /************************************************************************** + * + * @function: + * FT_Glyph_Stroke + * + * @description: + * Stroke a given outline glyph object with a given stroker. + * + * @inout: + * pglyph :: + * Source glyph handle on input, new glyph handle on output. + * + * @input: + * stroker :: + * A stroker handle. + * + * destroy :: + * A Boolean. If~1, the source glyph object is destroyed on success. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The source glyph is untouched in case of error. + * + * Adding stroke may yield a significantly wider and taller glyph + * depending on how large of a radius was used to stroke the glyph. You + * may need to manually adjust horizontal and vertical advance amounts to + * account for this added size. + */ + FT_EXPORT( FT_Error ) + FT_Glyph_Stroke( FT_Glyph *pglyph, + FT_Stroker stroker, + FT_Bool destroy ); + + + /************************************************************************** + * + * @function: + * FT_Glyph_StrokeBorder + * + * @description: + * Stroke a given outline glyph object with a given stroker, but only + * return either its inside or outside border. + * + * @inout: + * pglyph :: + * Source glyph handle on input, new glyph handle on output. + * + * @input: + * stroker :: + * A stroker handle. + * + * inside :: + * A Boolean. If~1, return the inside border, otherwise the outside + * border. + * + * destroy :: + * A Boolean. If~1, the source glyph object is destroyed on success. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The source glyph is untouched in case of error. + * + * Adding stroke may yield a significantly wider and taller glyph + * depending on how large of a radius was used to stroke the glyph. You + * may need to manually adjust horizontal and vertical advance amounts to + * account for this added size. + */ + FT_EXPORT( FT_Error ) + FT_Glyph_StrokeBorder( FT_Glyph *pglyph, + FT_Stroker stroker, + FT_Bool inside, + FT_Bool destroy ); + + /* */ + +FT_END_HEADER + +#endif /* FTSTROKE_H_ */ + + +/* END */ + + +/* Local Variables: */ +/* coding: utf-8 */ +/* End: */ diff --git a/Example/include/freetype/ftsynth.h b/Example/include/freetype/ftsynth.h new file mode 100644 index 0000000..af90967 --- /dev/null +++ b/Example/include/freetype/ftsynth.h @@ -0,0 +1,104 @@ +/**************************************************************************** + * + * ftsynth.h + * + * FreeType synthesizing code for emboldening and slanting + * (specification). + * + * Copyright (C) 2000-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /********* *********/ + /********* WARNING, THIS IS ALPHA CODE! THIS API *********/ + /********* IS DUE TO CHANGE UNTIL STRICTLY NOTIFIED BY THE *********/ + /********* FREETYPE DEVELOPMENT TEAM *********/ + /********* *********/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /* Main reason for not lifting the functions in this module to a */ + /* 'standard' API is that the used parameters for emboldening and */ + /* slanting are not configurable. Consider the functions as a */ + /* code resource that should be copied into the application and */ + /* adapted to the particular needs. */ + + +#ifndef FTSYNTH_H_ +#define FTSYNTH_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + /* Embolden a glyph by a 'reasonable' value (which is highly a matter of */ + /* taste). This function is actually a convenience function, providing */ + /* a wrapper for @FT_Outline_Embolden and @FT_Bitmap_Embolden. */ + /* */ + /* For emboldened outlines the height, width, and advance metrics are */ + /* increased by the strength of the emboldening -- this even affects */ + /* mono-width fonts! */ + /* */ + /* You can also call @FT_Outline_Get_CBox to get precise values. */ + FT_EXPORT( void ) + FT_GlyphSlot_Embolden( FT_GlyphSlot slot ); + + /* Precisely adjust the glyph weight either horizontally or vertically. */ + /* The `xdelta` and `ydelta` values are fractions of the face Em size */ + /* (in fixed-point format). Considering that a regular face would have */ + /* stem widths on the order of 0.1 Em, a delta of 0.05 (0x0CCC) should */ + /* be very noticeable. To increase or decrease the weight, use positive */ + /* or negative values, respectively. */ + FT_EXPORT( void ) + FT_GlyphSlot_AdjustWeight( FT_GlyphSlot slot, + FT_Fixed xdelta, + FT_Fixed ydelta ); + + + /* Slant an outline glyph to the right by about 12 degrees. */ + FT_EXPORT( void ) + FT_GlyphSlot_Oblique( FT_GlyphSlot slot ); + + /* Slant an outline glyph by a given sine of an angle. You can apply */ + /* slant along either x- or y-axis by choosing a corresponding non-zero */ + /* argument. If both slants are non-zero, some affine transformation */ + /* will result. */ + FT_EXPORT( void ) + FT_GlyphSlot_Slant( FT_GlyphSlot slot, + FT_Fixed xslant, + FT_Fixed yslant ); + + /* */ + + +FT_END_HEADER + +#endif /* FTSYNTH_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftsystem.h b/Example/include/freetype/ftsystem.h new file mode 100644 index 0000000..3a08f49 --- /dev/null +++ b/Example/include/freetype/ftsystem.h @@ -0,0 +1,350 @@ +/**************************************************************************** + * + * ftsystem.h + * + * FreeType low-level system interface definition (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTSYSTEM_H_ +#define FTSYSTEM_H_ + + + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * system_interface + * + * @title: + * System Interface + * + * @abstract: + * How FreeType manages memory and i/o. + * + * @description: + * This section contains various definitions related to memory management + * and i/o access. You need to understand this information if you want to + * use a custom memory manager or you own i/o streams. + * + */ + + + /************************************************************************** + * + * M E M O R Y M A N A G E M E N T + * + */ + + + /************************************************************************** + * + * @type: + * FT_Memory + * + * @description: + * A handle to a given memory manager object, defined with an + * @FT_MemoryRec structure. + * + */ + typedef struct FT_MemoryRec_* FT_Memory; + + + /************************************************************************** + * + * @functype: + * FT_Alloc_Func + * + * @description: + * A function used to allocate `size` bytes from `memory`. + * + * @input: + * memory :: + * A handle to the source memory manager. + * + * size :: + * The size in bytes to allocate. + * + * @return: + * Address of new memory block. 0~in case of failure. + * + */ + typedef void* + (*FT_Alloc_Func)( FT_Memory memory, + long size ); + + + /************************************************************************** + * + * @functype: + * FT_Free_Func + * + * @description: + * A function used to release a given block of memory. + * + * @input: + * memory :: + * A handle to the source memory manager. + * + * block :: + * The address of the target memory block. + * + */ + typedef void + (*FT_Free_Func)( FT_Memory memory, + void* block ); + + + /************************************************************************** + * + * @functype: + * FT_Realloc_Func + * + * @description: + * A function used to re-allocate a given block of memory. + * + * @input: + * memory :: + * A handle to the source memory manager. + * + * cur_size :: + * The block's current size in bytes. + * + * new_size :: + * The block's requested new size. + * + * block :: + * The block's current address. + * + * @return: + * New block address. 0~in case of memory shortage. + * + * @note: + * In case of error, the old block must still be available. + * + */ + typedef void* + (*FT_Realloc_Func)( FT_Memory memory, + long cur_size, + long new_size, + void* block ); + + + /************************************************************************** + * + * @struct: + * FT_MemoryRec + * + * @description: + * A structure used to describe a given memory manager to FreeType~2. + * + * @fields: + * user :: + * A generic typeless pointer for user data. + * + * alloc :: + * A pointer type to an allocation function. + * + * free :: + * A pointer type to an memory freeing function. + * + * realloc :: + * A pointer type to a reallocation function. + * + */ + struct FT_MemoryRec_ + { + void* user; + FT_Alloc_Func alloc; + FT_Free_Func free; + FT_Realloc_Func realloc; + }; + + + /************************************************************************** + * + * I / O M A N A G E M E N T + * + */ + + + /************************************************************************** + * + * @type: + * FT_Stream + * + * @description: + * A handle to an input stream. + * + * @also: + * See @FT_StreamRec for the publicly accessible fields of a given stream + * object. + * + */ + typedef struct FT_StreamRec_* FT_Stream; + + + /************************************************************************** + * + * @struct: + * FT_StreamDesc + * + * @description: + * A union type used to store either a long or a pointer. This is used + * to store a file descriptor or a `FILE*` in an input stream. + * + */ + typedef union FT_StreamDesc_ + { + long value; + void* pointer; + + } FT_StreamDesc; + + + /************************************************************************** + * + * @functype: + * FT_Stream_IoFunc + * + * @description: + * A function used to seek and read data from a given input stream. + * + * @input: + * stream :: + * A handle to the source stream. + * + * offset :: + * The offset from the start of the stream to seek to. + * + * buffer :: + * The address of the read buffer. + * + * count :: + * The number of bytes to read from the stream. + * + * @return: + * If count >~0, return the number of bytes effectively read by the + * stream (after seeking to `offset`). If count ==~0, return the status + * of the seek operation (non-zero indicates an error). + * + */ + typedef unsigned long + (*FT_Stream_IoFunc)( FT_Stream stream, + unsigned long offset, + unsigned char* buffer, + unsigned long count ); + + + /************************************************************************** + * + * @functype: + * FT_Stream_CloseFunc + * + * @description: + * A function used to close a given input stream. + * + * @input: + * stream :: + * A handle to the target stream. + * + */ + typedef void + (*FT_Stream_CloseFunc)( FT_Stream stream ); + + + /************************************************************************** + * + * @struct: + * FT_StreamRec + * + * @description: + * A structure used to describe an input stream. + * + * @input: + * base :: + * For memory-based streams, this is the address of the first stream + * byte in memory. This field should always be set to `NULL` for + * disk-based streams. + * + * size :: + * The stream size in bytes. + * + * In case of compressed streams where the size is unknown before + * actually doing the decompression, the value is set to 0x7FFFFFFF. + * (Note that this size value can occur for normal streams also; it is + * thus just a hint.) + * + * pos :: + * The current position within the stream. + * + * descriptor :: + * This field is a union that can hold an integer or a pointer. It is + * used by stream implementations to store file descriptors or `FILE*` + * pointers. + * + * pathname :: + * This field is completely ignored by FreeType. However, it is often + * useful during debugging to use it to store the stream's filename + * (where available). + * + * read :: + * The stream's input function. + * + * close :: + * The stream's close function. + * + * memory :: + * The memory manager to use to preload frames. This is set internally + * by FreeType and shouldn't be touched by stream implementations. + * + * cursor :: + * This field is set and used internally by FreeType when parsing + * frames. In particular, the `FT_GET_XXX` macros use this instead of + * the `pos` field. + * + * limit :: + * This field is set and used internally by FreeType when parsing + * frames. + * + */ + typedef struct FT_StreamRec_ + { + unsigned char* base; + unsigned long size; + unsigned long pos; + + FT_StreamDesc descriptor; + FT_StreamDesc pathname; + FT_Stream_IoFunc read; + FT_Stream_CloseFunc close; + + FT_Memory memory; + unsigned char* cursor; + unsigned char* limit; + + } FT_StreamRec; + + /* */ + + +FT_END_HEADER + +#endif /* FTSYSTEM_H_ */ + + +/* END */ diff --git a/Example/include/freetype/fttrigon.h b/Example/include/freetype/fttrigon.h new file mode 100644 index 0000000..294981a --- /dev/null +++ b/Example/include/freetype/fttrigon.h @@ -0,0 +1,350 @@ +/**************************************************************************** + * + * fttrigon.h + * + * FreeType trigonometric functions (specification). + * + * Copyright (C) 2001-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTTRIGON_H_ +#define FTTRIGON_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * computations + * + */ + + + /************************************************************************** + * + * @type: + * FT_Angle + * + * @description: + * This type is used to model angle values in FreeType. Note that the + * angle is a 16.16 fixed-point value expressed in degrees. + * + */ + typedef FT_Fixed FT_Angle; + + + /************************************************************************** + * + * @macro: + * FT_ANGLE_PI + * + * @description: + * The angle pi expressed in @FT_Angle units. + * + */ +#define FT_ANGLE_PI ( 180L << 16 ) + + + /************************************************************************** + * + * @macro: + * FT_ANGLE_2PI + * + * @description: + * The angle 2*pi expressed in @FT_Angle units. + * + */ +#define FT_ANGLE_2PI ( FT_ANGLE_PI * 2 ) + + + /************************************************************************** + * + * @macro: + * FT_ANGLE_PI2 + * + * @description: + * The angle pi/2 expressed in @FT_Angle units. + * + */ +#define FT_ANGLE_PI2 ( FT_ANGLE_PI / 2 ) + + + /************************************************************************** + * + * @macro: + * FT_ANGLE_PI4 + * + * @description: + * The angle pi/4 expressed in @FT_Angle units. + * + */ +#define FT_ANGLE_PI4 ( FT_ANGLE_PI / 4 ) + + + /************************************************************************** + * + * @function: + * FT_Sin + * + * @description: + * Return the sinus of a given angle in fixed-point format. + * + * @input: + * angle :: + * The input angle. + * + * @return: + * The sinus value. + * + * @note: + * If you need both the sinus and cosinus for a given angle, use the + * function @FT_Vector_Unit. + * + */ + FT_EXPORT( FT_Fixed ) + FT_Sin( FT_Angle angle ); + + + /************************************************************************** + * + * @function: + * FT_Cos + * + * @description: + * Return the cosinus of a given angle in fixed-point format. + * + * @input: + * angle :: + * The input angle. + * + * @return: + * The cosinus value. + * + * @note: + * If you need both the sinus and cosinus for a given angle, use the + * function @FT_Vector_Unit. + * + */ + FT_EXPORT( FT_Fixed ) + FT_Cos( FT_Angle angle ); + + + /************************************************************************** + * + * @function: + * FT_Tan + * + * @description: + * Return the tangent of a given angle in fixed-point format. + * + * @input: + * angle :: + * The input angle. + * + * @return: + * The tangent value. + * + */ + FT_EXPORT( FT_Fixed ) + FT_Tan( FT_Angle angle ); + + + /************************************************************************** + * + * @function: + * FT_Atan2 + * + * @description: + * Return the arc-tangent corresponding to a given vector (x,y) in the 2d + * plane. + * + * @input: + * x :: + * The horizontal vector coordinate. + * + * y :: + * The vertical vector coordinate. + * + * @return: + * The arc-tangent value (i.e. angle). + * + */ + FT_EXPORT( FT_Angle ) + FT_Atan2( FT_Fixed x, + FT_Fixed y ); + + + /************************************************************************** + * + * @function: + * FT_Angle_Diff + * + * @description: + * Return the difference between two angles. The result is always + * constrained to the ]-PI..PI] interval. + * + * @input: + * angle1 :: + * First angle. + * + * angle2 :: + * Second angle. + * + * @return: + * Constrained value of `angle2-angle1`. + * + */ + FT_EXPORT( FT_Angle ) + FT_Angle_Diff( FT_Angle angle1, + FT_Angle angle2 ); + + + /************************************************************************** + * + * @function: + * FT_Vector_Unit + * + * @description: + * Return the unit vector corresponding to a given angle. After the + * call, the value of `vec.x` will be `cos(angle)`, and the value of + * `vec.y` will be `sin(angle)`. + * + * This function is useful to retrieve both the sinus and cosinus of a + * given angle quickly. + * + * @output: + * vec :: + * The address of target vector. + * + * @input: + * angle :: + * The input angle. + * + */ + FT_EXPORT( void ) + FT_Vector_Unit( FT_Vector* vec, + FT_Angle angle ); + + + /************************************************************************** + * + * @function: + * FT_Vector_Rotate + * + * @description: + * Rotate a vector by a given angle. + * + * @inout: + * vec :: + * The address of target vector. + * + * @input: + * angle :: + * The input angle. + * + */ + FT_EXPORT( void ) + FT_Vector_Rotate( FT_Vector* vec, + FT_Angle angle ); + + + /************************************************************************** + * + * @function: + * FT_Vector_Length + * + * @description: + * Return the length of a given vector. + * + * @input: + * vec :: + * The address of target vector. + * + * @return: + * The vector length, expressed in the same units that the original + * vector coordinates. + * + */ + FT_EXPORT( FT_Fixed ) + FT_Vector_Length( FT_Vector* vec ); + + + /************************************************************************** + * + * @function: + * FT_Vector_Polarize + * + * @description: + * Compute both the length and angle of a given vector. + * + * @input: + * vec :: + * The address of source vector. + * + * @output: + * length :: + * The vector length. + * + * angle :: + * The vector angle. + * + */ + FT_EXPORT( void ) + FT_Vector_Polarize( FT_Vector* vec, + FT_Fixed *length, + FT_Angle *angle ); + + + /************************************************************************** + * + * @function: + * FT_Vector_From_Polar + * + * @description: + * Compute vector coordinates from a length and angle. + * + * @output: + * vec :: + * The address of source vector. + * + * @input: + * length :: + * The vector length. + * + * angle :: + * The vector angle. + * + */ + FT_EXPORT( void ) + FT_Vector_From_Polar( FT_Vector* vec, + FT_Fixed length, + FT_Angle angle ); + + /* */ + + +FT_END_HEADER + +#endif /* FTTRIGON_H_ */ + + +/* END */ diff --git a/Example/include/freetype/fttypes.h b/Example/include/freetype/fttypes.h new file mode 100644 index 0000000..5b109f0 --- /dev/null +++ b/Example/include/freetype/fttypes.h @@ -0,0 +1,617 @@ +/**************************************************************************** + * + * fttypes.h + * + * FreeType simple types definitions (specification only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTTYPES_H_ +#define FTTYPES_H_ + + +#include +#include FT_CONFIG_CONFIG_H +#include +#include + +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * basic_types + * + * @title: + * Basic Data Types + * + * @abstract: + * The basic data types defined by the library. + * + * @description: + * This section contains the basic data types defined by FreeType~2, + * ranging from simple scalar types to bitmap descriptors. More + * font-specific structures are defined in a different section. Note + * that FreeType does not use floating-point data types. Fractional + * values are represented by fixed-point integers, with lower bits + * storing the fractional part. + * + * @order: + * FT_Byte + * FT_Bytes + * FT_Char + * FT_Int + * FT_UInt + * FT_Int16 + * FT_UInt16 + * FT_Int32 + * FT_UInt32 + * FT_Int64 + * FT_UInt64 + * FT_Short + * FT_UShort + * FT_Long + * FT_ULong + * FT_Bool + * FT_Offset + * FT_PtrDist + * FT_String + * FT_Tag + * FT_Error + * FT_Fixed + * FT_Pointer + * FT_Pos + * FT_Vector + * FT_BBox + * FT_Matrix + * FT_FWord + * FT_UFWord + * FT_F2Dot14 + * FT_UnitVector + * FT_F26Dot6 + * FT_Data + * + * FT_MAKE_TAG + * + * FT_Generic + * FT_Generic_Finalizer + * + * FT_Bitmap + * FT_Pixel_Mode + * FT_Palette_Mode + * FT_Glyph_Format + * FT_IMAGE_TAG + * + */ + + + /************************************************************************** + * + * @type: + * FT_Bool + * + * @description: + * A typedef of unsigned char, used for simple booleans. As usual, + * values 1 and~0 represent true and false, respectively. + */ + typedef unsigned char FT_Bool; + + + /************************************************************************** + * + * @type: + * FT_FWord + * + * @description: + * A signed 16-bit integer used to store a distance in original font + * units. + */ + typedef signed short FT_FWord; /* distance in FUnits */ + + + /************************************************************************** + * + * @type: + * FT_UFWord + * + * @description: + * An unsigned 16-bit integer used to store a distance in original font + * units. + */ + typedef unsigned short FT_UFWord; /* unsigned distance */ + + + /************************************************************************** + * + * @type: + * FT_Char + * + * @description: + * A simple typedef for the _signed_ char type. + */ + typedef signed char FT_Char; + + + /************************************************************************** + * + * @type: + * FT_Byte + * + * @description: + * A simple typedef for the _unsigned_ char type. + */ + typedef unsigned char FT_Byte; + + + /************************************************************************** + * + * @type: + * FT_Bytes + * + * @description: + * A typedef for constant memory areas. + */ + typedef const FT_Byte* FT_Bytes; + + + /************************************************************************** + * + * @type: + * FT_Tag + * + * @description: + * A typedef for 32-bit tags (as used in the SFNT format). + */ + typedef FT_UInt32 FT_Tag; + + + /************************************************************************** + * + * @type: + * FT_String + * + * @description: + * A simple typedef for the char type, usually used for strings. + */ + typedef char FT_String; + + + /************************************************************************** + * + * @type: + * FT_Short + * + * @description: + * A typedef for signed short. + */ + typedef signed short FT_Short; + + + /************************************************************************** + * + * @type: + * FT_UShort + * + * @description: + * A typedef for unsigned short. + */ + typedef unsigned short FT_UShort; + + + /************************************************************************** + * + * @type: + * FT_Int + * + * @description: + * A typedef for the int type. + */ + typedef signed int FT_Int; + + + /************************************************************************** + * + * @type: + * FT_UInt + * + * @description: + * A typedef for the unsigned int type. + */ + typedef unsigned int FT_UInt; + + + /************************************************************************** + * + * @type: + * FT_Long + * + * @description: + * A typedef for signed long. + */ + typedef signed long FT_Long; + + + /************************************************************************** + * + * @type: + * FT_ULong + * + * @description: + * A typedef for unsigned long. + */ + typedef unsigned long FT_ULong; + + + /************************************************************************** + * + * @type: + * FT_F2Dot14 + * + * @description: + * A signed 2.14 fixed-point type used for unit vectors. + */ + typedef signed short FT_F2Dot14; + + + /************************************************************************** + * + * @type: + * FT_F26Dot6 + * + * @description: + * A signed 26.6 fixed-point type used for vectorial pixel coordinates. + */ + typedef signed long FT_F26Dot6; + + + /************************************************************************** + * + * @type: + * FT_Fixed + * + * @description: + * This type is used to store 16.16 fixed-point values, like scaling + * values or matrix coefficients. + */ + typedef signed long FT_Fixed; + + + /************************************************************************** + * + * @type: + * FT_Error + * + * @description: + * The FreeType error code type. A value of~0 is always interpreted as a + * successful operation. + */ + typedef int FT_Error; + + + /************************************************************************** + * + * @type: + * FT_Pointer + * + * @description: + * A simple typedef for a typeless pointer. + */ + typedef void* FT_Pointer; + + + /************************************************************************** + * + * @type: + * FT_Offset + * + * @description: + * This is equivalent to the ANSI~C `size_t` type, i.e., the largest + * _unsigned_ integer type used to express a file size or position, or a + * memory block size. + */ + typedef size_t FT_Offset; + + + /************************************************************************** + * + * @type: + * FT_PtrDist + * + * @description: + * This is equivalent to the ANSI~C `ptrdiff_t` type, i.e., the largest + * _signed_ integer type used to express the distance between two + * pointers. + */ + typedef ft_ptrdiff_t FT_PtrDist; + + + /************************************************************************** + * + * @struct: + * FT_UnitVector + * + * @description: + * A simple structure used to store a 2D vector unit vector. Uses + * FT_F2Dot14 types. + * + * @fields: + * x :: + * Horizontal coordinate. + * + * y :: + * Vertical coordinate. + */ + typedef struct FT_UnitVector_ + { + FT_F2Dot14 x; + FT_F2Dot14 y; + + } FT_UnitVector; + + + /************************************************************************** + * + * @struct: + * FT_Matrix + * + * @description: + * A simple structure used to store a 2x2 matrix. Coefficients are in + * 16.16 fixed-point format. The computation performed is: + * + * ``` + * x' = x*xx + y*xy + * y' = x*yx + y*yy + * ``` + * + * @fields: + * xx :: + * Matrix coefficient. + * + * xy :: + * Matrix coefficient. + * + * yx :: + * Matrix coefficient. + * + * yy :: + * Matrix coefficient. + */ + typedef struct FT_Matrix_ + { + FT_Fixed xx, xy; + FT_Fixed yx, yy; + + } FT_Matrix; + + + /************************************************************************** + * + * @struct: + * FT_Data + * + * @description: + * Read-only binary data represented as a pointer and a length. + * + * @fields: + * pointer :: + * The data. + * + * length :: + * The length of the data in bytes. + */ + typedef struct FT_Data_ + { + const FT_Byte* pointer; + FT_UInt length; + + } FT_Data; + + + /************************************************************************** + * + * @functype: + * FT_Generic_Finalizer + * + * @description: + * Describe a function used to destroy the 'client' data of any FreeType + * object. See the description of the @FT_Generic type for details of + * usage. + * + * @input: + * The address of the FreeType object that is under finalization. Its + * client data is accessed through its `generic` field. + */ + typedef void (*FT_Generic_Finalizer)( void* object ); + + + /************************************************************************** + * + * @struct: + * FT_Generic + * + * @description: + * Client applications often need to associate their own data to a + * variety of FreeType core objects. For example, a text layout API + * might want to associate a glyph cache to a given size object. + * + * Some FreeType object contains a `generic` field, of type `FT_Generic`, + * which usage is left to client applications and font servers. + * + * It can be used to store a pointer to client-specific data, as well as + * the address of a 'finalizer' function, which will be called by + * FreeType when the object is destroyed (for example, the previous + * client example would put the address of the glyph cache destructor in + * the `finalizer` field). + * + * @fields: + * data :: + * A typeless pointer to any client-specified data. This field is + * completely ignored by the FreeType library. + * + * finalizer :: + * A pointer to a 'generic finalizer' function, which will be called + * when the object is destroyed. If this field is set to `NULL`, no + * code will be called. + */ + typedef struct FT_Generic_ + { + void* data; + FT_Generic_Finalizer finalizer; + + } FT_Generic; + + + /************************************************************************** + * + * @macro: + * FT_MAKE_TAG + * + * @description: + * This macro converts four-letter tags that are used to label TrueType + * tables into an `FT_Tag` type, to be used within FreeType. + * + * @note: + * The produced values **must** be 32-bit integers. Don't redefine this + * macro. + */ +#define FT_MAKE_TAG( _x1, _x2, _x3, _x4 ) \ + ( ( FT_STATIC_BYTE_CAST( FT_Tag, _x1 ) << 24 ) | \ + ( FT_STATIC_BYTE_CAST( FT_Tag, _x2 ) << 16 ) | \ + ( FT_STATIC_BYTE_CAST( FT_Tag, _x3 ) << 8 ) | \ + FT_STATIC_BYTE_CAST( FT_Tag, _x4 ) ) + + + /*************************************************************************/ + /*************************************************************************/ + /* */ + /* L I S T M A N A G E M E N T */ + /* */ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @section: + * list_processing + * + */ + + + /************************************************************************** + * + * @type: + * FT_ListNode + * + * @description: + * Many elements and objects in FreeType are listed through an @FT_List + * record (see @FT_ListRec). As its name suggests, an FT_ListNode is a + * handle to a single list element. + */ + typedef struct FT_ListNodeRec_* FT_ListNode; + + + /************************************************************************** + * + * @type: + * FT_List + * + * @description: + * A handle to a list record (see @FT_ListRec). + */ + typedef struct FT_ListRec_* FT_List; + + + /************************************************************************** + * + * @struct: + * FT_ListNodeRec + * + * @description: + * A structure used to hold a single list element. + * + * @fields: + * prev :: + * The previous element in the list. `NULL` if first. + * + * next :: + * The next element in the list. `NULL` if last. + * + * data :: + * A typeless pointer to the listed object. + */ + typedef struct FT_ListNodeRec_ + { + FT_ListNode prev; + FT_ListNode next; + void* data; + + } FT_ListNodeRec; + + + /************************************************************************** + * + * @struct: + * FT_ListRec + * + * @description: + * A structure used to hold a simple doubly-linked list. These are used + * in many parts of FreeType. + * + * @fields: + * head :: + * The head (first element) of doubly-linked list. + * + * tail :: + * The tail (last element) of doubly-linked list. + */ + typedef struct FT_ListRec_ + { + FT_ListNode head; + FT_ListNode tail; + + } FT_ListRec; + + /* */ + + +#define FT_IS_EMPTY( list ) ( (list).head == 0 ) +#define FT_BOOL( x ) FT_STATIC_CAST( FT_Bool, (x) != 0 ) + + /* concatenate C tokens */ +#define FT_ERR_XCAT( x, y ) x ## y +#define FT_ERR_CAT( x, y ) FT_ERR_XCAT( x, y ) + + /* see `ftmoderr.h` for descriptions of the following macros */ + +#define FT_ERR( e ) FT_ERR_CAT( FT_ERR_PREFIX, e ) + +#define FT_ERROR_BASE( x ) ( (x) & 0xFF ) +#define FT_ERROR_MODULE( x ) ( (x) & 0xFF00U ) + +#define FT_ERR_EQ( x, e ) \ + ( FT_ERROR_BASE( x ) == FT_ERROR_BASE( FT_ERR( e ) ) ) +#define FT_ERR_NEQ( x, e ) \ + ( FT_ERROR_BASE( x ) != FT_ERROR_BASE( FT_ERR( e ) ) ) + + +FT_END_HEADER + +#endif /* FTTYPES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ftwinfnt.h b/Example/include/freetype/ftwinfnt.h new file mode 100644 index 0000000..7b701ea --- /dev/null +++ b/Example/include/freetype/ftwinfnt.h @@ -0,0 +1,276 @@ +/**************************************************************************** + * + * ftwinfnt.h + * + * FreeType API for accessing Windows fnt-specific data. + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTWINFNT_H_ +#define FTWINFNT_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * winfnt_fonts + * + * @title: + * Window FNT Files + * + * @abstract: + * Windows FNT-specific API. + * + * @description: + * This section contains the declaration of Windows FNT-specific + * functions. + * + */ + + + /************************************************************************** + * + * @enum: + * FT_WinFNT_ID_XXX + * + * @description: + * A list of valid values for the `charset` byte in @FT_WinFNT_HeaderRec. + * Exact mapping tables for the various 'cpXXXX' encodings (except for + * 'cp1361') can be found at 'ftp://ftp.unicode.org/Public/' in the + * `MAPPINGS/VENDORS/MICSFT/WINDOWS` subdirectory. 'cp1361' is roughly a + * superset of `MAPPINGS/OBSOLETE/EASTASIA/KSC/JOHAB.TXT`. + * + * @values: + * FT_WinFNT_ID_DEFAULT :: + * This is used for font enumeration and font creation as a 'don't + * care' value. Valid font files don't contain this value. When + * querying for information about the character set of the font that is + * currently selected into a specified device context, this return + * value (of the related Windows API) simply denotes failure. + * + * FT_WinFNT_ID_SYMBOL :: + * There is no known mapping table available. + * + * FT_WinFNT_ID_MAC :: + * Mac Roman encoding. + * + * FT_WinFNT_ID_OEM :: + * From Michael Poettgen : + * + * The 'Windows Font Mapping' article says that `FT_WinFNT_ID_OEM` is + * used for the charset of vector fonts, like `modern.fon`, + * `roman.fon`, and `script.fon` on Windows. + * + * The 'CreateFont' documentation says: The `FT_WinFNT_ID_OEM` value + * specifies a character set that is operating-system dependent. + * + * The 'IFIMETRICS' documentation from the 'Windows Driver Development + * Kit' says: This font supports an OEM-specific character set. The + * OEM character set is system dependent. + * + * In general OEM, as opposed to ANSI (i.e., 'cp1252'), denotes the + * second default codepage that most international versions of Windows + * have. It is one of the OEM codepages from + * + * https://docs.microsoft.com/en-us/windows/desktop/intl/code-page-identifiers + * , + * + * and is used for the 'DOS boxes', to support legacy applications. A + * German Windows version for example usually uses ANSI codepage 1252 + * and OEM codepage 850. + * + * FT_WinFNT_ID_CP874 :: + * A superset of Thai TIS 620 and ISO 8859-11. + * + * FT_WinFNT_ID_CP932 :: + * A superset of Japanese Shift-JIS (with minor deviations). + * + * FT_WinFNT_ID_CP936 :: + * A superset of simplified Chinese GB 2312-1980 (with different + * ordering and minor deviations). + * + * FT_WinFNT_ID_CP949 :: + * A superset of Korean Hangul KS~C 5601-1987 (with different ordering + * and minor deviations). + * + * FT_WinFNT_ID_CP950 :: + * A superset of traditional Chinese Big~5 ETen (with different + * ordering and minor deviations). + * + * FT_WinFNT_ID_CP1250 :: + * A superset of East European ISO 8859-2 (with slightly different + * ordering). + * + * FT_WinFNT_ID_CP1251 :: + * A superset of Russian ISO 8859-5 (with different ordering). + * + * FT_WinFNT_ID_CP1252 :: + * ANSI encoding. A superset of ISO 8859-1. + * + * FT_WinFNT_ID_CP1253 :: + * A superset of Greek ISO 8859-7 (with minor modifications). + * + * FT_WinFNT_ID_CP1254 :: + * A superset of Turkish ISO 8859-9. + * + * FT_WinFNT_ID_CP1255 :: + * A superset of Hebrew ISO 8859-8 (with some modifications). + * + * FT_WinFNT_ID_CP1256 :: + * A superset of Arabic ISO 8859-6 (with different ordering). + * + * FT_WinFNT_ID_CP1257 :: + * A superset of Baltic ISO 8859-13 (with some deviations). + * + * FT_WinFNT_ID_CP1258 :: + * For Vietnamese. This encoding doesn't cover all necessary + * characters. + * + * FT_WinFNT_ID_CP1361 :: + * Korean (Johab). + */ + +#define FT_WinFNT_ID_CP1252 0 +#define FT_WinFNT_ID_DEFAULT 1 +#define FT_WinFNT_ID_SYMBOL 2 +#define FT_WinFNT_ID_MAC 77 +#define FT_WinFNT_ID_CP932 128 +#define FT_WinFNT_ID_CP949 129 +#define FT_WinFNT_ID_CP1361 130 +#define FT_WinFNT_ID_CP936 134 +#define FT_WinFNT_ID_CP950 136 +#define FT_WinFNT_ID_CP1253 161 +#define FT_WinFNT_ID_CP1254 162 +#define FT_WinFNT_ID_CP1258 163 +#define FT_WinFNT_ID_CP1255 177 +#define FT_WinFNT_ID_CP1256 178 +#define FT_WinFNT_ID_CP1257 186 +#define FT_WinFNT_ID_CP1251 204 +#define FT_WinFNT_ID_CP874 222 +#define FT_WinFNT_ID_CP1250 238 +#define FT_WinFNT_ID_OEM 255 + + + /************************************************************************** + * + * @struct: + * FT_WinFNT_HeaderRec + * + * @description: + * Windows FNT Header info. + */ + typedef struct FT_WinFNT_HeaderRec_ + { + FT_UShort version; + FT_ULong file_size; + FT_Byte copyright[60]; + FT_UShort file_type; + FT_UShort nominal_point_size; + FT_UShort vertical_resolution; + FT_UShort horizontal_resolution; + FT_UShort ascent; + FT_UShort internal_leading; + FT_UShort external_leading; + FT_Byte italic; + FT_Byte underline; + FT_Byte strike_out; + FT_UShort weight; + FT_Byte charset; + FT_UShort pixel_width; + FT_UShort pixel_height; + FT_Byte pitch_and_family; + FT_UShort avg_width; + FT_UShort max_width; + FT_Byte first_char; + FT_Byte last_char; + FT_Byte default_char; + FT_Byte break_char; + FT_UShort bytes_per_row; + FT_ULong device_offset; + FT_ULong face_name_offset; + FT_ULong bits_pointer; + FT_ULong bits_offset; + FT_Byte reserved; + FT_ULong flags; + FT_UShort A_space; + FT_UShort B_space; + FT_UShort C_space; + FT_UShort color_table_offset; + FT_ULong reserved1[4]; + + } FT_WinFNT_HeaderRec; + + + /************************************************************************** + * + * @struct: + * FT_WinFNT_Header + * + * @description: + * A handle to an @FT_WinFNT_HeaderRec structure. + */ + typedef struct FT_WinFNT_HeaderRec_* FT_WinFNT_Header; + + + /************************************************************************** + * + * @function: + * FT_Get_WinFNT_Header + * + * @description: + * Retrieve a Windows FNT font info header. + * + * @input: + * face :: + * A handle to the input face. + * + * @output: + * aheader :: + * The WinFNT header. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * This function only works with Windows FNT faces, returning an error + * otherwise. + */ + FT_EXPORT( FT_Error ) + FT_Get_WinFNT_Header( FT_Face face, + FT_WinFNT_HeaderRec *aheader ); + + /* */ + + +FT_END_HEADER + +#endif /* FTWINFNT_H_ */ + + +/* END */ + + +/* Local Variables: */ +/* coding: utf-8 */ +/* End: */ diff --git a/Example/include/freetype/internal/autohint.h b/Example/include/freetype/internal/autohint.h new file mode 100644 index 0000000..bf9c8b7 --- /dev/null +++ b/Example/include/freetype/internal/autohint.h @@ -0,0 +1,234 @@ +/**************************************************************************** + * + * autohint.h + * + * High-level 'autohint' module-specific interface (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * The auto-hinter is used to load and automatically hint glyphs if a + * format-specific hinter isn't available. + * + */ + + +#ifndef AUTOHINT_H_ +#define AUTOHINT_H_ + + + /************************************************************************** + * + * A small technical note regarding automatic hinting in order to clarify + * this module interface. + * + * An automatic hinter might compute two kinds of data for a given face: + * + * - global hints: Usually some metrics that describe global properties + * of the face. It is computed by scanning more or less + * aggressively the glyphs in the face, and thus can be + * very slow to compute (even if the size of global hints + * is really small). + * + * - glyph hints: These describe some important features of the glyph + * outline, as well as how to align them. They are + * generally much faster to compute than global hints. + * + * The current FreeType auto-hinter does a pretty good job while performing + * fast computations for both global and glyph hints. However, we might be + * interested in introducing more complex and powerful algorithms in the + * future, like the one described in the John D. Hobby paper, which + * unfortunately requires a lot more horsepower. + * + * Because a sufficiently sophisticated font management system would + * typically implement an LRU cache of opened face objects to reduce memory + * usage, it is a good idea to be able to avoid recomputing global hints + * every time the same face is re-opened. + * + * We thus provide the ability to cache global hints outside of the face + * object, in order to speed up font re-opening time. Of course, this + * feature is purely optional, so most client programs won't even notice + * it. + * + * I initially thought that it would be a good idea to cache the glyph + * hints too. However, my general idea now is that if you really need to + * cache these too, you are simply in need of a new font format, where all + * this information could be stored within the font file and decoded on the + * fly. + * + */ + + +#include + + +FT_BEGIN_HEADER + + + typedef struct FT_AutoHinterRec_ *FT_AutoHinter; + + + /************************************************************************** + * + * @functype: + * FT_AutoHinter_GlobalGetFunc + * + * @description: + * Retrieve the global hints computed for a given face object. The + * resulting data is dissociated from the face and will survive a call to + * FT_Done_Face(). It must be discarded through the API + * FT_AutoHinter_GlobalDoneFunc(). + * + * @input: + * hinter :: + * A handle to the source auto-hinter. + * + * face :: + * A handle to the source face object. + * + * @output: + * global_hints :: + * A typeless pointer to the global hints. + * + * global_len :: + * The size in bytes of the global hints. + */ + typedef void + (*FT_AutoHinter_GlobalGetFunc)( FT_AutoHinter hinter, + FT_Face face, + void** global_hints, + long* global_len ); + + + /************************************************************************** + * + * @functype: + * FT_AutoHinter_GlobalDoneFunc + * + * @description: + * Discard the global hints retrieved through + * FT_AutoHinter_GlobalGetFunc(). This is the only way these hints are + * freed from memory. + * + * @input: + * hinter :: + * A handle to the auto-hinter module. + * + * global :: + * A pointer to retrieved global hints to discard. + */ + typedef void + (*FT_AutoHinter_GlobalDoneFunc)( FT_AutoHinter hinter, + void* global ); + + + /************************************************************************** + * + * @functype: + * FT_AutoHinter_GlobalResetFunc + * + * @description: + * This function is used to recompute the global metrics in a given font. + * This is useful when global font data changes (e.g. Multiple Masters + * fonts where blend coordinates change). + * + * @input: + * hinter :: + * A handle to the source auto-hinter. + * + * face :: + * A handle to the face. + */ + typedef void + (*FT_AutoHinter_GlobalResetFunc)( FT_AutoHinter hinter, + FT_Face face ); + + + /************************************************************************** + * + * @functype: + * FT_AutoHinter_GlyphLoadFunc + * + * @description: + * This function is used to load, scale, and automatically hint a glyph + * from a given face. + * + * @input: + * face :: + * A handle to the face. + * + * glyph_index :: + * The glyph index. + * + * load_flags :: + * The load flags. + * + * @note: + * This function is capable of loading composite glyphs by hinting each + * sub-glyph independently (which improves quality). + * + * It will call the font driver with @FT_Load_Glyph, with + * @FT_LOAD_NO_SCALE set. + */ + typedef FT_Error + (*FT_AutoHinter_GlyphLoadFunc)( FT_AutoHinter hinter, + FT_GlyphSlot slot, + FT_Size size, + FT_UInt glyph_index, + FT_Int32 load_flags ); + + + /************************************************************************** + * + * @struct: + * FT_AutoHinter_InterfaceRec + * + * @description: + * The auto-hinter module's interface. + */ + typedef struct FT_AutoHinter_InterfaceRec_ + { + FT_AutoHinter_GlobalResetFunc reset_face; + FT_AutoHinter_GlobalGetFunc get_global_hints; + FT_AutoHinter_GlobalDoneFunc done_global_hints; + FT_AutoHinter_GlyphLoadFunc load_glyph; + + } FT_AutoHinter_InterfaceRec, *FT_AutoHinter_Interface; + + +#define FT_DECLARE_AUTOHINTER_INTERFACE( class_ ) \ + FT_CALLBACK_TABLE const FT_AutoHinter_InterfaceRec class_; + +#define FT_DEFINE_AUTOHINTER_INTERFACE( \ + class_, \ + reset_face_, \ + get_global_hints_, \ + done_global_hints_, \ + load_glyph_ ) \ + FT_CALLBACK_TABLE_DEF \ + const FT_AutoHinter_InterfaceRec class_ = \ + { \ + reset_face_, \ + get_global_hints_, \ + done_global_hints_, \ + load_glyph_ \ + }; + + +FT_END_HEADER + +#endif /* AUTOHINT_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/cffotypes.h b/Example/include/freetype/internal/cffotypes.h new file mode 100644 index 0000000..50d5353 --- /dev/null +++ b/Example/include/freetype/internal/cffotypes.h @@ -0,0 +1,107 @@ +/**************************************************************************** + * + * cffotypes.h + * + * Basic OpenType/CFF object type definitions (specification). + * + * Copyright (C) 2017-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef CFFOTYPES_H_ +#define CFFOTYPES_H_ + +#include +#include +#include +#include +#include + + +FT_BEGIN_HEADER + + + typedef TT_Face CFF_Face; + + + /************************************************************************** + * + * @type: + * CFF_Size + * + * @description: + * A handle to an OpenType size object. + */ + typedef struct CFF_SizeRec_ + { + FT_SizeRec root; + FT_ULong strike_index; /* 0xFFFFFFFF to indicate invalid */ + + } CFF_SizeRec, *CFF_Size; + + + /************************************************************************** + * + * @type: + * CFF_GlyphSlot + * + * @description: + * A handle to an OpenType glyph slot object. + */ + typedef struct CFF_GlyphSlotRec_ + { + FT_GlyphSlotRec root; + + FT_Bool hint; + FT_Bool scaled; + + FT_Fixed x_scale; + FT_Fixed y_scale; + + } CFF_GlyphSlotRec, *CFF_GlyphSlot; + + + /************************************************************************** + * + * @type: + * CFF_Internal + * + * @description: + * The interface to the 'internal' field of `FT_Size`. + */ + typedef struct CFF_InternalRec_ + { + PSH_Globals topfont; + PSH_Globals subfonts[CFF_MAX_CID_FONTS]; + + } CFF_InternalRec, *CFF_Internal; + + + /************************************************************************** + * + * Subglyph transformation record. + */ + typedef struct CFF_Transform_ + { + FT_Fixed xx, xy; /* transformation matrix coefficients */ + FT_Fixed yx, yy; + FT_F26Dot6 ox, oy; /* offsets */ + + } CFF_Transform; + + +FT_END_HEADER + + +#endif /* CFFOTYPES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/cfftypes.h b/Example/include/freetype/internal/cfftypes.h new file mode 100644 index 0000000..c252176 --- /dev/null +++ b/Example/include/freetype/internal/cfftypes.h @@ -0,0 +1,416 @@ +/**************************************************************************** + * + * cfftypes.h + * + * Basic OpenType/CFF type definitions and interface (specification + * only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef CFFTYPES_H_ +#define CFFTYPES_H_ + + +#include +#include +#include +#include +#include +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @struct: + * CFF_IndexRec + * + * @description: + * A structure used to model a CFF Index table. + * + * @fields: + * stream :: + * The source input stream. + * + * start :: + * The position of the first index byte in the input stream. + * + * count :: + * The number of elements in the index. + * + * off_size :: + * The size in bytes of object offsets in index. + * + * data_offset :: + * The position of first data byte in the index's bytes. + * + * data_size :: + * The size of the data table in this index. + * + * offsets :: + * A table of element offsets in the index. Must be loaded explicitly. + * + * bytes :: + * If the index is loaded in memory, its bytes. + */ + typedef struct CFF_IndexRec_ + { + FT_Stream stream; + FT_ULong start; + FT_UInt hdr_size; + FT_UInt count; + FT_Byte off_size; + FT_ULong data_offset; + FT_ULong data_size; + + FT_ULong* offsets; + FT_Byte* bytes; + + } CFF_IndexRec, *CFF_Index; + + + typedef struct CFF_EncodingRec_ + { + FT_UInt format; + FT_ULong offset; + + FT_UInt count; + FT_UShort sids [256]; /* avoid dynamic allocations */ + FT_UShort codes[256]; + + } CFF_EncodingRec, *CFF_Encoding; + + + typedef struct CFF_CharsetRec_ + { + + FT_UInt format; + FT_ULong offset; + + FT_UShort* sids; + FT_UShort* cids; /* the inverse mapping of `sids'; only needed */ + /* for CID-keyed fonts */ + FT_UInt max_cid; + FT_UInt num_glyphs; + + } CFF_CharsetRec, *CFF_Charset; + + + /* cf. similar fields in file `ttgxvar.h' from the `truetype' module */ + + typedef struct CFF_VarData_ + { +#if 0 + FT_UInt itemCount; /* not used; always zero */ + FT_UInt shortDeltaCount; /* not used; always zero */ +#endif + + FT_UInt regionIdxCount; /* number of region indexes */ + FT_UInt* regionIndices; /* array of `regionIdxCount' indices; */ + /* these index `varRegionList' */ + } CFF_VarData; + + + /* contribution of one axis to a region */ + typedef struct CFF_AxisCoords_ + { + FT_Fixed startCoord; + FT_Fixed peakCoord; /* zero peak means no effect (factor = 1) */ + FT_Fixed endCoord; + + } CFF_AxisCoords; + + + typedef struct CFF_VarRegion_ + { + CFF_AxisCoords* axisList; /* array of axisCount records */ + + } CFF_VarRegion; + + + typedef struct CFF_VStoreRec_ + { + FT_UInt dataCount; + CFF_VarData* varData; /* array of dataCount records */ + /* vsindex indexes this array */ + FT_UShort axisCount; + FT_UInt regionCount; /* total number of regions defined */ + CFF_VarRegion* varRegionList; + + } CFF_VStoreRec, *CFF_VStore; + + + /* forward reference */ + typedef struct CFF_FontRec_* CFF_Font; + + + /* This object manages one cached blend vector. */ + /* */ + /* There is a BlendRec for Private DICT parsing in each subfont */ + /* and a BlendRec for charstrings in CF2_Font instance data. */ + /* A cached BV may be used across DICTs or Charstrings if inputs */ + /* have not changed. */ + /* */ + /* `usedBV' is reset at the start of each parse or charstring. */ + /* vsindex cannot be changed after a BV is used. */ + /* */ + /* Note: NDV is long (32/64 bit), while BV is 16.16 (FT_Int32). */ + typedef struct CFF_BlendRec_ + { + FT_Bool builtBV; /* blendV has been built */ + FT_Bool usedBV; /* blendV has been used */ + CFF_Font font; /* top level font struct */ + FT_UInt lastVsindex; /* last vsindex used */ + FT_UInt lenNDV; /* normDV length (aka numAxes) */ + FT_Fixed* lastNDV; /* last NDV used */ + FT_UInt lenBV; /* BlendV length (aka numMasters) */ + FT_Int32* BV; /* current blendV (per DICT/glyph) */ + + } CFF_BlendRec, *CFF_Blend; + + + typedef struct CFF_FontRecDictRec_ + { + FT_UInt version; + FT_UInt notice; + FT_UInt copyright; + FT_UInt full_name; + FT_UInt family_name; + FT_UInt weight; + FT_Bool is_fixed_pitch; + FT_Fixed italic_angle; + FT_Fixed underline_position; + FT_Fixed underline_thickness; + FT_Int paint_type; + FT_Int charstring_type; + FT_Matrix font_matrix; + FT_Bool has_font_matrix; + FT_ULong units_per_em; /* temporarily used as scaling value also */ + FT_Vector font_offset; + FT_ULong unique_id; + FT_BBox font_bbox; + FT_Pos stroke_width; + FT_ULong charset_offset; + FT_ULong encoding_offset; + FT_ULong charstrings_offset; + FT_ULong private_offset; + FT_ULong private_size; + FT_Long synthetic_base; + FT_UInt embedded_postscript; + + /* these should only be used for the top-level font dictionary */ + FT_UInt cid_registry; + FT_UInt cid_ordering; + FT_Long cid_supplement; + + FT_Long cid_font_version; + FT_Long cid_font_revision; + FT_Long cid_font_type; + FT_ULong cid_count; + FT_ULong cid_uid_base; + FT_ULong cid_fd_array_offset; + FT_ULong cid_fd_select_offset; + FT_UInt cid_font_name; + + /* the next fields come from the data of the deprecated */ + /* `MultipleMaster' operator; they are needed to parse the (also */ + /* deprecated) `blend' operator in Type 2 charstrings */ + FT_UShort num_designs; + FT_UShort num_axes; + + /* fields for CFF2 */ + FT_ULong vstore_offset; + FT_UInt maxstack; + + } CFF_FontRecDictRec, *CFF_FontRecDict; + + + /* forward reference */ + typedef struct CFF_SubFontRec_* CFF_SubFont; + + + typedef struct CFF_PrivateRec_ + { + FT_Byte num_blue_values; + FT_Byte num_other_blues; + FT_Byte num_family_blues; + FT_Byte num_family_other_blues; + + FT_Pos blue_values[14]; + FT_Pos other_blues[10]; + FT_Pos family_blues[14]; + FT_Pos family_other_blues[10]; + + FT_Fixed blue_scale; + FT_Pos blue_shift; + FT_Pos blue_fuzz; + FT_Pos standard_width; + FT_Pos standard_height; + + FT_Byte num_snap_widths; + FT_Byte num_snap_heights; + FT_Pos snap_widths[13]; + FT_Pos snap_heights[13]; + FT_Bool force_bold; + FT_Fixed force_bold_threshold; + FT_Int lenIV; + FT_Int language_group; + FT_Fixed expansion_factor; + FT_Long initial_random_seed; + FT_ULong local_subrs_offset; + FT_Pos default_width; + FT_Pos nominal_width; + + /* fields for CFF2 */ + FT_UInt vsindex; + CFF_SubFont subfont; + + } CFF_PrivateRec, *CFF_Private; + + + typedef struct CFF_FDSelectRec_ + { + FT_Byte format; + FT_UInt range_count; + + /* that's the table, taken from the file `as is' */ + FT_Byte* data; + FT_UInt data_size; + + /* small cache for format 3 only */ + FT_UInt cache_first; + FT_UInt cache_count; + FT_Byte cache_fd; + + } CFF_FDSelectRec, *CFF_FDSelect; + + + /* A SubFont packs a font dict and a private dict together. They are */ + /* needed to support CID-keyed CFF fonts. */ + typedef struct CFF_SubFontRec_ + { + CFF_FontRecDictRec font_dict; + CFF_PrivateRec private_dict; + + /* fields for CFF2 */ + CFF_BlendRec blend; /* current blend vector */ + FT_UInt lenNDV; /* current length NDV or zero */ + FT_Fixed* NDV; /* ptr to current NDV or NULL */ + + /* `blend_stack' is a writable buffer to hold blend results. */ + /* This buffer is to the side of the normal cff parser stack; */ + /* `cff_parse_blend' and `cff_blend_doBlend' push blend results here. */ + /* The normal stack then points to these values instead of the DICT */ + /* because all other operators in Private DICT clear the stack. */ + /* `blend_stack' could be cleared at each operator other than blend. */ + /* Blended values are stored as 5-byte fixed-point values. */ + + FT_Byte* blend_stack; /* base of stack allocation */ + FT_Byte* blend_top; /* first empty slot */ + FT_UInt blend_used; /* number of bytes in use */ + FT_UInt blend_alloc; /* number of bytes allocated */ + + CFF_IndexRec local_subrs_index; + FT_Byte** local_subrs; /* array of pointers */ + /* into Local Subrs INDEX data */ + + FT_UInt32 random; + + } CFF_SubFontRec; + + +#define CFF_MAX_CID_FONTS 256 + + + typedef struct CFF_FontRec_ + { + FT_Library library; + FT_Stream stream; + FT_Memory memory; /* TODO: take this from stream->memory? */ + FT_ULong base_offset; /* offset to start of CFF */ + FT_UInt num_faces; + FT_UInt num_glyphs; + + FT_Byte version_major; + FT_Byte version_minor; + FT_Byte header_size; + + FT_UInt top_dict_length; /* cff2 only */ + + FT_Bool cff2; + + CFF_IndexRec name_index; + CFF_IndexRec top_dict_index; + CFF_IndexRec global_subrs_index; + + CFF_EncodingRec encoding; + CFF_CharsetRec charset; + + CFF_IndexRec charstrings_index; + CFF_IndexRec font_dict_index; + CFF_IndexRec private_index; + CFF_IndexRec local_subrs_index; + + FT_String* font_name; + + /* array of pointers into Global Subrs INDEX data */ + FT_Byte** global_subrs; + + /* array of pointers into String INDEX data stored at string_pool */ + FT_UInt num_strings; + FT_Byte** strings; + FT_Byte* string_pool; + FT_ULong string_pool_size; + + CFF_SubFontRec top_font; + FT_UInt num_subfonts; + CFF_SubFont subfonts[CFF_MAX_CID_FONTS]; + + CFF_FDSelectRec fd_select; + + /* interface to PostScript hinter */ + PSHinter_Service pshinter; + + /* interface to Postscript Names service */ + FT_Service_PsCMaps psnames; + + /* interface to CFFLoad service */ + const void* cffload; + + /* since version 2.3.0 */ + PS_FontInfoRec* font_info; /* font info dictionary */ + + /* since version 2.3.6 */ + FT_String* registry; + FT_String* ordering; + + /* since version 2.4.12 */ + FT_Generic cf2_instance; + + /* since version 2.7.1 */ + CFF_VStoreRec vstore; /* parsed vstore structure */ + + /* since version 2.9 */ + PS_FontExtraRec* font_extra; + + } CFF_FontRec; + + +FT_END_HEADER + +#endif /* CFFTYPES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/compiler-macros.h b/Example/include/freetype/internal/compiler-macros.h new file mode 100644 index 0000000..6f67650 --- /dev/null +++ b/Example/include/freetype/internal/compiler-macros.h @@ -0,0 +1,343 @@ +/**************************************************************************** + * + * internal/compiler-macros.h + * + * Compiler-specific macro definitions used internally by FreeType. + * + * Copyright (C) 2020-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + +#ifndef INTERNAL_COMPILER_MACROS_H_ +#define INTERNAL_COMPILER_MACROS_H_ + +#include + +FT_BEGIN_HEADER + + /* Fix compiler warning with sgi compiler. */ +#if defined( __sgi ) && !defined( __GNUC__ ) +# if defined( _COMPILER_VERSION ) && ( _COMPILER_VERSION >= 730 ) +# pragma set woff 3505 +# endif +#endif + + /* Fix compiler warning with sgi compiler. */ +#if defined( __sgi ) && !defined( __GNUC__ ) +# if defined( _COMPILER_VERSION ) && ( _COMPILER_VERSION >= 730 ) +# pragma set woff 3505 +# endif +#endif + + /* Newer compilers warn for fall-through case statements. */ +#ifndef FALL_THROUGH +# if ( defined( __STDC_VERSION__ ) && __STDC_VERSION__ > 201710L ) || \ + ( defined( __cplusplus ) && __cplusplus > 201402L ) +# define FALL_THROUGH [[__fallthrough__]] +# elif ( defined( __GNUC__ ) && __GNUC__ >= 7 ) || \ + ( defined( __clang__ ) && \ + ( defined( __apple_build_version__ ) \ + ? __apple_build_version__ >= 12000000 \ + : __clang_major__ >= 10 ) ) +# define FALL_THROUGH __attribute__(( __fallthrough__ )) +# else +# define FALL_THROUGH ( (void)0 ) +# endif +#endif + + /* + * When defining a macro that expands to a non-trivial C statement, use + * FT_BEGIN_STMNT and FT_END_STMNT to enclose the macro's body. This + * ensures there are no surprises when the macro is invoked in conditional + * branches. + * + * Example: + * + * #define LOG( ... ) \ + * FT_BEGIN_STMNT \ + * if ( logging_enabled ) \ + * log( __VA_ARGS__ ); \ + * FT_END_STMNT + */ +#define FT_BEGIN_STMNT do { +#define FT_END_STMNT } while ( 0 ) + + /* + * FT_DUMMY_STMNT expands to an empty C statement. Useful for + * conditionally defined statement macros. + * + * Example: + * + * #ifdef BUILD_CONFIG_LOGGING + * #define LOG( ... ) \ + * FT_BEGIN_STMNT \ + * if ( logging_enabled ) \ + * log( __VA_ARGS__ ); \ + * FT_END_STMNT + * #else + * # define LOG( ... ) FT_DUMMY_STMNT + * #endif + */ +#define FT_DUMMY_STMNT FT_BEGIN_STMNT FT_END_STMNT + +#ifdef __UINTPTR_TYPE__ + /* + * GCC and Clang both provide a `__UINTPTR_TYPE__` that can be used to + * avoid a dependency on `stdint.h`. + */ +# define FT_UINT_TO_POINTER( x ) (void *)(__UINTPTR_TYPE__)(x) +#elif defined( _WIN64 ) + /* only 64bit Windows uses the LLP64 data model, i.e., */ + /* 32-bit integers, 64-bit pointers. */ +# define FT_UINT_TO_POINTER( x ) (void *)(unsigned __int64)(x) +#else +# define FT_UINT_TO_POINTER( x ) (void *)(unsigned long)(x) +#endif + + /* + * Use `FT_TYPEOF( type )` to cast a value to `type`. This is useful to + * suppress signedness compilation warnings in macros. + * + * Example: + * + * #define PAD_( x, n ) ( (x) & ~FT_TYPEOF( x )( (n) - 1 ) ) + * + * (The `typeof` condition is taken from gnulib's `intprops.h` header + * file.) + */ +#if ( ( defined( __GNUC__ ) && __GNUC__ >= 2 ) || \ + ( defined( __IBMC__ ) && __IBMC__ >= 1210 && \ + defined( __IBM__TYPEOF__ ) ) || \ + ( defined( __SUNPRO_C ) && __SUNPRO_C >= 0x5110 && !__STDC__ ) ) +#define FT_TYPEOF( type ) ( __typeof__ ( type ) ) +#else +#define FT_TYPEOF( type ) /* empty */ +#endif + + /* + * Mark a function declaration as internal to the library. This ensures + * that it will not be exposed by default to client code, and helps + * generate smaller and faster code on ELF-based platforms. Place this + * before a function declaration. + */ + + /* Visual C, mingw */ +#if defined( _WIN32 ) +#define FT_INTERNAL_FUNCTION_ATTRIBUTE /* empty */ + + /* gcc, clang */ +#elif ( defined( __GNUC__ ) && __GNUC__ >= 4 ) || defined( __clang__ ) +#define FT_INTERNAL_FUNCTION_ATTRIBUTE \ + __attribute__(( visibility( "hidden" ) )) + + /* Sun */ +#elif defined( __SUNPRO_C ) && __SUNPRO_C >= 0x550 +#define FT_INTERNAL_FUNCTION_ATTRIBUTE __hidden + +#else +#define FT_INTERNAL_FUNCTION_ATTRIBUTE /* empty */ +#endif + + /* + * FreeType supports compilation of its C sources with a C++ compiler (in + * C++ mode); this introduces a number of subtle issues. + * + * The main one is that a C++ function declaration and its definition must + * have the same 'linkage'. Because all FreeType headers declare their + * functions with C linkage (i.e., within an `extern "C" { ... }` block + * due to the magic of FT_BEGIN_HEADER and FT_END_HEADER), their + * definition in FreeType sources should also be prefixed with `extern + * "C"` when compiled in C++ mode. + * + * The `FT_FUNCTION_DECLARATION` and `FT_FUNCTION_DEFINITION` macros are + * provided to deal with this case, as well as `FT_CALLBACK_DEF` and its + * siblings below. + */ + + /* + * `FT_FUNCTION_DECLARATION( type )` can be used to write a C function + * declaration to ensure it will have C linkage when the library is built + * with a C++ compiler. The parameter is the function's return type, so a + * declaration would look like + * + * FT_FUNCTION_DECLARATION( int ) + * foo( int x ); + * + * NOTE: This requires that all uses are inside of `FT_BEGIN_HEADER ... + * FT_END_HEADER` blocks, which guarantees that the declarations have C + * linkage when the headers are included by C++ sources. + * + * NOTE: Do not use directly. Use `FT_LOCAL`, `FT_BASE`, and `FT_EXPORT` + * instead. + */ +#define FT_FUNCTION_DECLARATION( x ) extern x + + /* + * Same as `FT_FUNCTION_DECLARATION`, but for function definitions instead. + * + * NOTE: Do not use directly. Use `FT_LOCAL_DEF`, `FT_BASE_DEF`, and + * `FT_EXPORT_DEF` instead. + */ +#ifdef __cplusplus +#define FT_FUNCTION_DEFINITION( x ) extern "C" x +#else +#define FT_FUNCTION_DEFINITION( x ) x +#endif + + /* + * Use `FT_LOCAL` and `FT_LOCAL_DEF` to declare and define, respectively, + * an internal FreeType function that is only used by the sources of a + * single `src/module/` directory. This ensures that the functions are + * turned into static ones at build time, resulting in smaller and faster + * code. + */ +#ifdef FT_MAKE_OPTION_SINGLE_OBJECT + +#define FT_LOCAL( x ) static x +#define FT_LOCAL_DEF( x ) static x + +#else + +#define FT_LOCAL( x ) FT_INTERNAL_FUNCTION_ATTRIBUTE \ + FT_FUNCTION_DECLARATION( x ) +#define FT_LOCAL_DEF( x ) FT_FUNCTION_DEFINITION( x ) + +#endif /* FT_MAKE_OPTION_SINGLE_OBJECT */ + + /* + * Use `FT_LOCAL_ARRAY` and `FT_LOCAL_ARRAY_DEF` to declare and define, + * respectively, a constant array that must be accessed from several + * sources in the same `src/module/` sub-directory, and which are internal + * to the library. + */ +#define FT_LOCAL_ARRAY( x ) FT_INTERNAL_FUNCTION_ATTRIBUTE \ + extern const x +#define FT_LOCAL_ARRAY_DEF( x ) FT_FUNCTION_DEFINITION( const x ) + + /* + * `Use FT_BASE` and `FT_BASE_DEF` to declare and define, respectively, an + * internal library function that is used by more than a single module. + */ +#define FT_BASE( x ) FT_INTERNAL_FUNCTION_ATTRIBUTE \ + FT_FUNCTION_DECLARATION( x ) +#define FT_BASE_DEF( x ) FT_FUNCTION_DEFINITION( x ) + + + /* + * NOTE: Conditionally define `FT_EXPORT_VAR` due to its definition in + * `src/smooth/ftgrays.h` to make the header more portable. + */ +#ifndef FT_EXPORT_VAR +#define FT_EXPORT_VAR( x ) FT_FUNCTION_DECLARATION( x ) +#endif + + /* + * When compiling FreeType as a DLL or DSO with hidden visibility, + * some systems/compilers need a special attribute in front OR after + * the return type of function declarations. + * + * Two macros are used within the FreeType source code to define + * exported library functions: `FT_EXPORT` and `FT_EXPORT_DEF`. + * + * - `FT_EXPORT( return_type )` + * + * is used in a function declaration, as in + * + * ``` + * FT_EXPORT( FT_Error ) + * FT_Init_FreeType( FT_Library* alibrary ); + * ``` + * + * - `FT_EXPORT_DEF( return_type )` + * + * is used in a function definition, as in + * + * ``` + * FT_EXPORT_DEF( FT_Error ) + * FT_Init_FreeType( FT_Library* alibrary ) + * { + * ... some code ... + * return FT_Err_Ok; + * } + * ``` + * + * You can provide your own implementation of `FT_EXPORT` and + * `FT_EXPORT_DEF` here if you want. + * + * To export a variable, use `FT_EXPORT_VAR`. + */ + + /* See `freetype/config/public-macros.h` for the `FT_EXPORT` definition */ +#define FT_EXPORT_DEF( x ) FT_FUNCTION_DEFINITION( x ) + + /* + * The following macros are needed to compile the library with a + * C++ compiler and with 16bit compilers. + */ + + /* + * This is special. Within C++, you must specify `extern "C"` for + * functions which are used via function pointers, and you also + * must do that for structures which contain function pointers to + * assure C linkage -- it's not possible to have (local) anonymous + * functions which are accessed by (global) function pointers. + * + * + * FT_CALLBACK_DEF is used to _define_ a callback function, + * located in the same source code file as the structure that uses + * it. FT_COMPARE_DEF, in addition, ensures the `cdecl` calling + * convention on x86, required by the C library function `qsort`. + * + * FT_BASE_CALLBACK and FT_BASE_CALLBACK_DEF are used to declare + * and define a callback function, respectively, in a similar way + * as FT_BASE and FT_BASE_DEF work. + * + * FT_CALLBACK_TABLE is used to _declare_ a constant variable that + * contains pointers to callback functions. + * + * FT_CALLBACK_TABLE_DEF is used to _define_ a constant variable + * that contains pointers to callback functions. + * + * + * Some 16bit compilers have to redefine these macros to insert + * the infamous `_cdecl` or `__fastcall` declarations. + */ +#ifdef __cplusplus +#define FT_CALLBACK_DEF( x ) extern "C" x +#else +#define FT_CALLBACK_DEF( x ) static x +#endif + +#if defined( __GNUC__ ) && defined( __i386__ ) +#define FT_COMPARE_DEF( x ) FT_CALLBACK_DEF( x ) __attribute__(( cdecl )) +#elif defined( _MSC_VER ) && defined( _M_IX86 ) +#define FT_COMPARE_DEF( x ) FT_CALLBACK_DEF( x ) __cdecl +#elif defined( __WATCOMC__ ) && __WATCOMC__ >= 1240 +#define FT_COMPARE_DEF( x ) FT_CALLBACK_DEF( x ) __watcall +#else +#define FT_COMPARE_DEF( x ) FT_CALLBACK_DEF( x ) +#endif + +#define FT_BASE_CALLBACK( x ) FT_FUNCTION_DECLARATION( x ) +#define FT_BASE_CALLBACK_DEF( x ) FT_FUNCTION_DEFINITION( x ) + +#ifndef FT_CALLBACK_TABLE +#ifdef __cplusplus +#define FT_CALLBACK_TABLE extern "C" +#define FT_CALLBACK_TABLE_DEF extern "C" +#else +#define FT_CALLBACK_TABLE extern +#define FT_CALLBACK_TABLE_DEF /* nothing */ +#endif +#endif /* FT_CALLBACK_TABLE */ + +FT_END_HEADER + +#endif /* INTERNAL_COMPILER_MACROS_H_ */ diff --git a/Example/include/freetype/internal/ftcalc.h b/Example/include/freetype/internal/ftcalc.h new file mode 100644 index 0000000..d1baa39 --- /dev/null +++ b/Example/include/freetype/internal/ftcalc.h @@ -0,0 +1,581 @@ +/**************************************************************************** + * + * ftcalc.h + * + * Arithmetic computations (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTCALC_H_ +#define FTCALC_H_ + + +#include + +#include "compiler-macros.h" + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * FT_MulDiv() and FT_MulFix() are declared in freetype.h. + * + */ + +#ifndef FT_CONFIG_OPTION_NO_ASSEMBLER + /* Provide assembler fragments for performance-critical functions. */ + /* These must be defined `static __inline__' with GCC. */ + +#if defined( __CC_ARM ) || defined( __ARMCC__ ) /* RVCT */ + +#define FT_MULFIX_ASSEMBLER FT_MulFix_arm + + /* documentation is in freetype.h */ + + static __inline FT_Int32 + FT_MulFix_arm( FT_Int32 a, + FT_Int32 b ) + { + FT_Int32 t, t2; + + + __asm + { + smull t2, t, b, a /* (lo=t2,hi=t) = a*b */ + mov a, t, asr #31 /* a = (hi >> 31) */ + add a, a, #0x8000 /* a += 0x8000 */ + adds t2, t2, a /* t2 += a */ + adc t, t, #0 /* t += carry */ + mov a, t2, lsr #16 /* a = t2 >> 16 */ + orr a, a, t, lsl #16 /* a |= t << 16 */ + } + return a; + } + +#endif /* __CC_ARM || __ARMCC__ */ + + +#ifdef __GNUC__ + +#if defined( __arm__ ) && \ + ( !defined( __thumb__ ) || defined( __thumb2__ ) ) && \ + !( defined( __CC_ARM ) || defined( __ARMCC__ ) ) + +#define FT_MULFIX_ASSEMBLER FT_MulFix_arm + + /* documentation is in freetype.h */ + + static __inline__ FT_Int32 + FT_MulFix_arm( FT_Int32 a, + FT_Int32 b ) + { + FT_Int32 t, t2; + + + __asm__ __volatile__ ( + "smull %1, %2, %4, %3\n\t" /* (lo=%1,hi=%2) = a*b */ + "mov %0, %2, asr #31\n\t" /* %0 = (hi >> 31) */ +#if defined( __clang__ ) && defined( __thumb2__ ) + "add.w %0, %0, #0x8000\n\t" /* %0 += 0x8000 */ +#else + "add %0, %0, #0x8000\n\t" /* %0 += 0x8000 */ +#endif + "adds %1, %1, %0\n\t" /* %1 += %0 */ + "adc %2, %2, #0\n\t" /* %2 += carry */ + "mov %0, %1, lsr #16\n\t" /* %0 = %1 >> 16 */ + "orr %0, %0, %2, lsl #16\n\t" /* %0 |= %2 << 16 */ + : "=r"(a), "=&r"(t2), "=&r"(t) + : "r"(a), "r"(b) + : "cc" ); + return a; + } + +#endif /* __arm__ && */ + /* ( __thumb2__ || !__thumb__ ) && */ + /* !( __CC_ARM || __ARMCC__ ) */ + + +#if defined( __i386__ ) + +#define FT_MULFIX_ASSEMBLER FT_MulFix_i386 + + /* documentation is in freetype.h */ + + static __inline__ FT_Int32 + FT_MulFix_i386( FT_Int32 a, + FT_Int32 b ) + { + FT_Int32 result; + + + __asm__ __volatile__ ( + "imul %%edx\n" + "movl %%edx, %%ecx\n" + "sarl $31, %%ecx\n" + "addl $0x8000, %%ecx\n" + "addl %%ecx, %%eax\n" + "adcl $0, %%edx\n" + "shrl $16, %%eax\n" + "shll $16, %%edx\n" + "addl %%edx, %%eax\n" + : "=a"(result), "=d"(b) + : "a"(a), "d"(b) + : "%ecx", "cc" ); + return result; + } + +#endif /* i386 */ + +#endif /* __GNUC__ */ + + +#ifdef _MSC_VER /* Visual C++ */ + +#ifdef _M_IX86 + +#define FT_MULFIX_ASSEMBLER FT_MulFix_i386 + + /* documentation is in freetype.h */ + + static __inline FT_Int32 + FT_MulFix_i386( FT_Int32 a, + FT_Int32 b ) + { + FT_Int32 result; + + __asm + { + mov eax, a + mov edx, b + imul edx + mov ecx, edx + sar ecx, 31 + add ecx, 8000h + add eax, ecx + adc edx, 0 + shr eax, 16 + shl edx, 16 + add eax, edx + mov result, eax + } + return result; + } + +#endif /* _M_IX86 */ + +#endif /* _MSC_VER */ + + +#if defined( __GNUC__ ) && defined( __x86_64__ ) + +#define FT_MULFIX_ASSEMBLER FT_MulFix_x86_64 + + static __inline__ FT_Int32 + FT_MulFix_x86_64( FT_Int32 a, + FT_Int32 b ) + { + /* Temporarily disable the warning that C90 doesn't support */ + /* `long long'. */ +#if __GNUC__ > 4 || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 ) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wlong-long" +#endif + +#if 1 + /* Technically not an assembly fragment, but GCC does a really good */ + /* job at inlining it and generating good machine code for it. */ + long long ret, tmp; + + + ret = (long long)a * b; + tmp = ret >> 63; + ret += 0x8000 + tmp; + + return (FT_Int32)( ret >> 16 ); +#else + + /* For some reason, GCC 4.6 on Ubuntu 12.04 generates invalid machine */ + /* code from the lines below. The main issue is that `wide_a' is not */ + /* properly initialized by sign-extending `a'. Instead, the generated */ + /* machine code assumes that the register that contains `a' on input */ + /* can be used directly as a 64-bit value, which is wrong most of the */ + /* time. */ + long long wide_a = (long long)a; + long long wide_b = (long long)b; + long long result; + + + __asm__ __volatile__ ( + "imul %2, %1\n" + "mov %1, %0\n" + "sar $63, %0\n" + "lea 0x8000(%1, %0), %0\n" + "sar $16, %0\n" + : "=&r"(result), "=&r"(wide_a) + : "r"(wide_b) + : "cc" ); + + return (FT_Int32)result; +#endif + +#if __GNUC__ > 4 || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 ) +#pragma GCC diagnostic pop +#endif + } + +#endif /* __GNUC__ && __x86_64__ */ + +#endif /* !FT_CONFIG_OPTION_NO_ASSEMBLER */ + + +#ifdef FT_CONFIG_OPTION_INLINE_MULFIX +#ifdef FT_MULFIX_ASSEMBLER +#define FT_MulFix( a, b ) FT_MULFIX_ASSEMBLER( (FT_Int32)(a), (FT_Int32)(b) ) +#endif +#endif + + + /************************************************************************** + * + * @function: + * FT_MulDiv_No_Round + * + * @description: + * A very simple function used to perform the computation '(a*b)/c' + * (without rounding) with maximum accuracy (it uses a 64-bit + * intermediate integer whenever necessary). + * + * This function isn't necessarily as fast as some processor-specific + * operations, but is at least completely portable. + * + * @input: + * a :: + * The first multiplier. + * b :: + * The second multiplier. + * c :: + * The divisor. + * + * @return: + * The result of '(a*b)/c'. This function never traps when trying to + * divide by zero; it simply returns 'MaxInt' or 'MinInt' depending on + * the signs of 'a' and 'b'. + */ + FT_BASE( FT_Long ) + FT_MulDiv_No_Round( FT_Long a, + FT_Long b, + FT_Long c ); + + + /************************************************************************** + * + * @function: + * FT_MulAddFix + * + * @description: + * Compute `(s[0] * f[0] + s[1] * f[1] + ...) / 0x10000`, where `s[n]` is + * usually a 16.16 scalar. + * + * @input: + * s :: + * The array of scalars. + * f :: + * The array of factors. + * count :: + * The number of entries in the array. + * + * @return: + * The result of `(s[0] * f[0] + s[1] * f[1] + ...) / 0x10000`. + * + * @note: + * This function is currently used for the scaled delta computation of + * variation stores. It internally uses 64-bit data types when + * available, otherwise it emulates 64-bit math by using 32-bit + * operations, which produce a correct result but most likely at a slower + * performance in comparison to the implementation base on `int64_t`. + * + */ + FT_BASE( FT_Int32 ) + FT_MulAddFix( FT_Fixed* s, + FT_Int32* f, + FT_UInt count ); + + + /* + * A variant of FT_Matrix_Multiply which scales its result afterwards. The + * idea is that both `a' and `b' are scaled by factors of 10 so that the + * values are as precise as possible to get a correct result during the + * 64bit multiplication. Let `sa' and `sb' be the scaling factors of `a' + * and `b', respectively, then the scaling factor of the result is `sa*sb'. + */ + FT_BASE( void ) + FT_Matrix_Multiply_Scaled( const FT_Matrix* a, + FT_Matrix *b, + FT_Long scaling ); + + + /* + * Check a matrix. If the transformation would lead to extreme shear or + * extreme scaling, for example, return 0. If everything is OK, return 1. + * + * Based on geometric considerations we use the following inequality to + * identify a degenerate matrix. + * + * 50 * abs(xx*yy - xy*yx) < xx^2 + xy^2 + yx^2 + yy^2 + * + * Value 50 is heuristic. + */ + FT_BASE( FT_Bool ) + FT_Matrix_Check( const FT_Matrix* matrix ); + + + /* + * A variant of FT_Vector_Transform. See comments for + * FT_Matrix_Multiply_Scaled. + */ + FT_BASE( void ) + FT_Vector_Transform_Scaled( FT_Vector* vector, + const FT_Matrix* matrix, + FT_Long scaling ); + + + /* + * This function normalizes a vector and returns its original length. The + * normalized vector is a 16.16 fixed-point unit vector with length close + * to 0x10000. The accuracy of the returned length is limited to 16 bits + * also. The function utilizes quick inverse square root approximation + * without divisions and square roots relying on Newton's iterations + * instead. + */ + FT_BASE( FT_UInt32 ) + FT_Vector_NormLen( FT_Vector* vector ); + + + /* + * Return -1, 0, or +1, depending on the orientation of a given corner. We + * use the Cartesian coordinate system, with positive vertical values going + * upwards. The function returns +1 if the corner turns to the left, -1 to + * the right, and 0 for undecidable cases. + */ + FT_BASE( FT_Int ) + ft_corner_orientation( FT_Pos in_x, + FT_Pos in_y, + FT_Pos out_x, + FT_Pos out_y ); + + + /* + * Return TRUE if a corner is flat or nearly flat. This is equivalent to + * saying that the corner point is close to its neighbors, or inside an + * ellipse defined by the neighbor focal points to be more precise. + */ + FT_BASE( FT_Int ) + ft_corner_is_flat( FT_Pos in_x, + FT_Pos in_y, + FT_Pos out_x, + FT_Pos out_y ); + + + /* + * Return the most significant bit index. + */ + +#ifndef FT_CONFIG_OPTION_NO_ASSEMBLER + +#if defined( __clang__ ) || ( defined( __GNUC__ ) && \ + ( __GNUC__ > 3 || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4 ) ) ) + +#if FT_SIZEOF_INT == 4 + +#define FT_MSB( x ) ( 31 - __builtin_clz( x ) ) + +#elif FT_SIZEOF_LONG == 4 + +#define FT_MSB( x ) ( 31 - __builtin_clzl( x ) ) + +#endif + +#elif defined( _MSC_VER ) && _MSC_VER >= 1400 + +#if defined( _WIN32_WCE ) + +#include +#pragma intrinsic( _CountLeadingZeros ) + +#define FT_MSB( x ) ( 31 - _CountLeadingZeros( x ) ) + +#elif defined( _M_ARM64 ) || defined( _M_ARM ) + +#include +#pragma intrinsic( _CountLeadingZeros ) + +#define FT_MSB( x ) ( 31 - _CountLeadingZeros( x ) ) + +#elif defined( _M_IX86 ) || defined( _M_AMD64 ) || defined( _M_IA64 ) + +#include +#pragma intrinsic( _BitScanReverse ) + + static __inline FT_Int32 + FT_MSB_i386( FT_UInt32 x ) + { + unsigned long where; + + + _BitScanReverse( &where, x ); + + return (FT_Int32)where; + } + +#define FT_MSB( x ) FT_MSB_i386( x ) + +#endif + +#elif defined( __WATCOMC__ ) && defined( __386__ ) + + extern __inline FT_Int32 + FT_MSB_i386( FT_UInt32 x ); + +#pragma aux FT_MSB_i386 = \ + "bsr eax, eax" \ + __parm [__eax] __nomemory \ + __value [__eax] \ + __modify __exact [__eax] __nomemory; + +#define FT_MSB( x ) FT_MSB_i386( x ) + +#elif defined( __DECC ) || defined( __DECCXX ) + +#include + +#define FT_MSB( x ) (FT_Int)( 63 - _leadz( x ) ) + +#elif defined( _CRAYC ) + +#include + +#define FT_MSB( x ) (FT_Int)( 31 - _leadz32( x ) ) + +#endif /* FT_MSB macro definitions */ + +#endif /* !FT_CONFIG_OPTION_NO_ASSEMBLER */ + + +#ifndef FT_MSB + + FT_BASE( FT_Int ) + FT_MSB( FT_UInt32 z ); + +#endif + + + /* + * Return sqrt(x*x+y*y), which is the same as `FT_Vector_Length' but uses + * two fixed-point arguments instead. + */ + FT_BASE( FT_Fixed ) + FT_Hypot( FT_Fixed x, + FT_Fixed y ); + + +#if 0 + + /************************************************************************** + * + * @function: + * FT_SqrtFixed + * + * @description: + * Computes the square root of a 16.16 fixed-point value. + * + * @input: + * x :: + * The value to compute the root for. + * + * @return: + * The result of 'sqrt(x)'. + * + * @note: + * This function is not very fast. + */ + FT_BASE( FT_Int32 ) + FT_SqrtFixed( FT_Int32 x ); + +#endif /* 0 */ + + +#define INT_TO_F26DOT6( x ) ( (FT_Long)(x) * 64 ) /* << 6 */ +#define INT_TO_F2DOT14( x ) ( (FT_Long)(x) * 16384 ) /* << 14 */ +#define INT_TO_FIXED( x ) ( (FT_Long)(x) * 65536 ) /* << 16 */ +#define F2DOT14_TO_FIXED( x ) ( (FT_Long)(x) * 4 ) /* << 2 */ +#define FIXED_TO_INT( x ) ( FT_RoundFix( x ) >> 16 ) + +#define ROUND_F26DOT6( x ) ( ( (x) + 32 - ( x < 0 ) ) & -64 ) + + /* + * The following macros have two purposes. + * + * - Tag places where overflow is expected and harmless. + * + * - Avoid run-time sanitizer errors. + * + * Use with care! + */ +#define ADD_INT( a, b ) \ + (FT_Int)( (FT_UInt)(a) + (FT_UInt)(b) ) +#define SUB_INT( a, b ) \ + (FT_Int)( (FT_UInt)(a) - (FT_UInt)(b) ) +#define MUL_INT( a, b ) \ + (FT_Int)( (FT_UInt)(a) * (FT_UInt)(b) ) +#define NEG_INT( a ) \ + (FT_Int)( (FT_UInt)0 - (FT_UInt)(a) ) + +#define ADD_LONG( a, b ) \ + (FT_Long)( (FT_ULong)(a) + (FT_ULong)(b) ) +#define SUB_LONG( a, b ) \ + (FT_Long)( (FT_ULong)(a) - (FT_ULong)(b) ) +#define MUL_LONG( a, b ) \ + (FT_Long)( (FT_ULong)(a) * (FT_ULong)(b) ) +#define NEG_LONG( a ) \ + (FT_Long)( (FT_ULong)0 - (FT_ULong)(a) ) + +#define ADD_INT32( a, b ) \ + (FT_Int32)( (FT_UInt32)(a) + (FT_UInt32)(b) ) +#define SUB_INT32( a, b ) \ + (FT_Int32)( (FT_UInt32)(a) - (FT_UInt32)(b) ) +#define MUL_INT32( a, b ) \ + (FT_Int32)( (FT_UInt32)(a) * (FT_UInt32)(b) ) +#define NEG_INT32( a ) \ + (FT_Int32)( (FT_UInt32)0 - (FT_UInt32)(a) ) + +#ifdef FT_INT64 + +#define ADD_INT64( a, b ) \ + (FT_Int64)( (FT_UInt64)(a) + (FT_UInt64)(b) ) +#define SUB_INT64( a, b ) \ + (FT_Int64)( (FT_UInt64)(a) - (FT_UInt64)(b) ) +#define MUL_INT64( a, b ) \ + (FT_Int64)( (FT_UInt64)(a) * (FT_UInt64)(b) ) +#define NEG_INT64( a ) \ + (FT_Int64)( (FT_UInt64)0 - (FT_UInt64)(a) ) + +#endif /* FT_INT64 */ + + +FT_END_HEADER + +#endif /* FTCALC_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftdebug.h b/Example/include/freetype/internal/ftdebug.h new file mode 100644 index 0000000..4e013ba --- /dev/null +++ b/Example/include/freetype/internal/ftdebug.h @@ -0,0 +1,442 @@ +/**************************************************************************** + * + * ftdebug.h + * + * Debugging and logging component (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + * + * IMPORTANT: A description of FreeType's debugging support can be + * found in 'docs/DEBUG.TXT'. Read it if you need to use or + * understand this code. + * + */ + + +#ifndef FTDEBUG_H_ +#define FTDEBUG_H_ + + +#include +#include FT_CONFIG_CONFIG_H +#include + +#include "compiler-macros.h" + +#ifdef FT_DEBUG_LOGGING +#define DLG_STATIC +#include +#include + +#include +#endif /* FT_DEBUG_LOGGING */ + + +FT_BEGIN_HEADER + + /* force the definition of FT_DEBUG_LEVEL_TRACE if FT_DEBUG_LOGGING is */ + /* already defined. */ + /* */ +#ifdef FT_DEBUG_LOGGING +#undef FT_DEBUG_LEVEL_TRACE +#define FT_DEBUG_LEVEL_TRACE +#endif + + /* force the definition of FT_DEBUG_LEVEL_ERROR if FT_DEBUG_LEVEL_TRACE */ + /* is already defined; this simplifies the following #ifdefs */ + /* */ +#ifdef FT_DEBUG_LEVEL_TRACE +#undef FT_DEBUG_LEVEL_ERROR +#define FT_DEBUG_LEVEL_ERROR +#endif + + + /************************************************************************** + * + * Define the trace enums as well as the trace levels array when they are + * needed. + * + */ + +#ifdef FT_DEBUG_LEVEL_TRACE + +#define FT_TRACE_DEF( x ) trace_ ## x , + + /* defining the enumeration */ + typedef enum FT_Trace_ + { +#include + trace_count + + } FT_Trace; + + + /* a pointer to the array of trace levels, */ + /* provided by `src/base/ftdebug.c' */ + extern int* ft_trace_levels; + +#undef FT_TRACE_DEF + +#endif /* FT_DEBUG_LEVEL_TRACE */ + + + /************************************************************************** + * + * Define the FT_TRACE macro + * + * IMPORTANT! + * + * Each component must define the macro FT_COMPONENT to a valid FT_Trace + * value before using any TRACE macro. + * + * To get consistent logging output, there should be no newline character + * (i.e., '\n') or a single trailing one in the message string of + * `FT_TRACEx` and `FT_ERROR`. + */ + + + /************************************************************************* + * + * If FT_DEBUG_LOGGING is enabled, tracing messages are sent to dlg's API. + * If FT_DEBUG_LOGGING is disabled, tracing messages are sent to + * `FT_Message` (defined in ftdebug.c). + */ +#ifdef FT_DEBUG_LOGGING + + /* we need two macros to convert the names of `FT_COMPONENT` to a string */ +#define FT_LOGGING_TAG( x ) FT_LOGGING_TAG_( x ) +#define FT_LOGGING_TAG_( x ) #x + + /* we need two macros to convert the component and the trace level */ + /* to a string that combines them */ +#define FT_LOGGING_TAGX( x, y ) FT_LOGGING_TAGX_( x, y ) +#define FT_LOGGING_TAGX_( x, y ) #x ":" #y + + +#define FT_LOG( level, varformat ) \ + do \ + { \ + const char* dlg_tag = FT_LOGGING_TAGX( FT_COMPONENT, level ); \ + \ + \ + ft_add_tag( dlg_tag ); \ + if ( ft_trace_levels[FT_TRACE_COMP( FT_COMPONENT )] >= level ) \ + { \ + if ( custom_output_handler != NULL ) \ + FT_Logging_Callback varformat; \ + else \ + dlg_trace varformat; \ + } \ + ft_remove_tag( dlg_tag ); \ + } while( 0 ) + +#else /* !FT_DEBUG_LOGGING */ + +#define FT_LOG( level, varformat ) \ + do \ + { \ + if ( ft_trace_levels[FT_TRACE_COMP( FT_COMPONENT )] >= level ) \ + FT_Message varformat; \ + } while ( 0 ) + +#endif /* !FT_DEBUG_LOGGING */ + + +#ifdef FT_DEBUG_LEVEL_TRACE + + /* we need two macros here to make cpp expand `FT_COMPONENT' */ +#define FT_TRACE_COMP( x ) FT_TRACE_COMP_( x ) +#define FT_TRACE_COMP_( x ) trace_ ## x + +#define FT_TRACE( level, varformat ) FT_LOG( level, varformat ) + +#else /* !FT_DEBUG_LEVEL_TRACE */ + +#define FT_TRACE( level, varformat ) do { } while ( 0 ) /* nothing */ + +#endif /* !FT_DEBUG_LEVEL_TRACE */ + + + /************************************************************************** + * + * @function: + * FT_Trace_Get_Count + * + * @description: + * Return the number of available trace components. + * + * @return: + * The number of trace components. 0 if FreeType 2 is not built with + * FT_DEBUG_LEVEL_TRACE definition. + * + * @note: + * This function may be useful if you want to access elements of the + * internal trace levels array by an index. + */ + FT_BASE( FT_Int ) + FT_Trace_Get_Count( void ); + + + /************************************************************************** + * + * @function: + * FT_Trace_Get_Name + * + * @description: + * Return the name of a trace component. + * + * @input: + * The index of the trace component. + * + * @return: + * The name of the trace component. This is a statically allocated + * C~string, so do not free it after use. `NULL` if FreeType is not + * built with FT_DEBUG_LEVEL_TRACE definition. + * + * @note: + * Use @FT_Trace_Get_Count to get the number of available trace + * components. + */ + FT_BASE( const char* ) + FT_Trace_Get_Name( FT_Int idx ); + + + /************************************************************************** + * + * @function: + * FT_Trace_Disable + * + * @description: + * Switch off tracing temporarily. It can be activated again with + * @FT_Trace_Enable. + */ + FT_BASE( void ) + FT_Trace_Disable( void ); + + + /************************************************************************** + * + * @function: + * FT_Trace_Enable + * + * @description: + * Activate tracing. Use it after tracing has been switched off with + * @FT_Trace_Disable. + */ + FT_BASE( void ) + FT_Trace_Enable( void ); + + + /************************************************************************** + * + * You need two opening and closing parentheses! + * + * Example: FT_TRACE0(( "Value is %i", foo )) + * + * Output of the FT_TRACEX macros is sent to stderr. + * + */ + +#define FT_TRACE0( varformat ) FT_TRACE( 0, varformat ) +#define FT_TRACE1( varformat ) FT_TRACE( 1, varformat ) +#define FT_TRACE2( varformat ) FT_TRACE( 2, varformat ) +#define FT_TRACE3( varformat ) FT_TRACE( 3, varformat ) +#define FT_TRACE4( varformat ) FT_TRACE( 4, varformat ) +#define FT_TRACE5( varformat ) FT_TRACE( 5, varformat ) +#define FT_TRACE6( varformat ) FT_TRACE( 6, varformat ) +#define FT_TRACE7( varformat ) FT_TRACE( 7, varformat ) + + + /************************************************************************** + * + * Define the FT_ERROR macro. + * + * Output of this macro is sent to stderr. + * + */ + +#ifdef FT_DEBUG_LEVEL_ERROR + + /************************************************************************** + * + * If FT_DEBUG_LOGGING is enabled, error messages are sent to dlg's API. + * If FT_DEBUG_LOGGING is disabled, error messages are sent to `FT_Message` + * (defined in ftdebug.c). + * + */ +#ifdef FT_DEBUG_LOGGING + +#define FT_ERROR( varformat ) \ + do \ + { \ + const char* dlg_tag = FT_LOGGING_TAG( FT_COMPONENT ); \ + \ + \ + ft_add_tag( dlg_tag ); \ + dlg_trace varformat; \ + ft_remove_tag( dlg_tag ); \ + } while ( 0 ) + +#else /* !FT_DEBUG_LOGGING */ + +#define FT_ERROR( varformat ) FT_Message varformat + +#endif /* !FT_DEBUG_LOGGING */ + + +#else /* !FT_DEBUG_LEVEL_ERROR */ + +#define FT_ERROR( varformat ) do { } while ( 0 ) /* nothing */ + +#endif /* !FT_DEBUG_LEVEL_ERROR */ + + + /************************************************************************** + * + * Define the FT_ASSERT and FT_THROW macros. The call to `FT_Throw` makes + * it possible to easily set a breakpoint at this function. + * + */ + +#ifdef FT_DEBUG_LEVEL_ERROR + +#define FT_ASSERT( condition ) \ + do \ + { \ + if ( !( condition ) ) \ + FT_Panic( "assertion failed on line %d of file %s\n", \ + __LINE__, __FILE__ ); \ + } while ( 0 ) + +#define FT_THROW( e ) \ + ( FT_Throw( FT_ERR_CAT( FT_ERR_PREFIX, e ), \ + __LINE__, \ + __FILE__ ) | \ + FT_ERR_CAT( FT_ERR_PREFIX, e ) ) + +#else /* !FT_DEBUG_LEVEL_ERROR */ + +#define FT_ASSERT( condition ) do { } while ( 0 ) + +#define FT_THROW( e ) FT_ERR_CAT( FT_ERR_PREFIX, e ) + +#endif /* !FT_DEBUG_LEVEL_ERROR */ + + + /************************************************************************** + * + * Define `FT_Message` and `FT_Panic` when needed. + * + */ + +#ifdef FT_DEBUG_LEVEL_ERROR + +#include "stdio.h" /* for vfprintf() */ + + /* print a message */ + FT_BASE( void ) + FT_Message( const char* fmt, + ... ); + + /* print a message and exit */ + FT_BASE( void ) + FT_Panic( const char* fmt, + ... ); + + /* report file name and line number of an error */ + FT_BASE( int ) + FT_Throw( FT_Error error, + int line, + const char* file ); + +#endif /* FT_DEBUG_LEVEL_ERROR */ + + + FT_BASE( void ) + ft_debug_init( void ); + + +#ifdef FT_DEBUG_LOGGING + + /************************************************************************** + * + * 'dlg' uses output handlers to control how and where log messages are + * printed. Therefore we need to define a default output handler for + * FreeType. + */ + FT_BASE( void ) + ft_log_handler( const struct dlg_origin* origin, + const char* string, + void* data ); + + + /************************************************************************** + * + * 1. `ft_default_log_handler` stores the function pointer that is used + * internally by FreeType to print logs to a file. + * + * 2. `custom_output_handler` stores the function pointer to the callback + * function provided by the user. + * + * It is defined in `ftdebug.c`. + */ + extern dlg_handler ft_default_log_handler; + extern FT_Custom_Log_Handler custom_output_handler; + + + /************************************************************************** + * + * If FT_DEBUG_LOGGING macro is enabled, FreeType needs to initialize and + * un-initialize `FILE*`. + * + * These functions are defined in `ftdebug.c`. + */ + FT_BASE( void ) + ft_logging_init( void ); + + FT_BASE( void ) + ft_logging_deinit( void ); + + + /************************************************************************** + * + * For printing the name of `FT_COMPONENT` along with the actual log we + * need to add a tag with the name of `FT_COMPONENT`. + * + * These functions are defined in `ftdebug.c`. + */ + FT_BASE( void ) + ft_add_tag( const char* tag ); + + FT_BASE( void ) + ft_remove_tag( const char* tag ); + + + /************************************************************************** + * + * A function to print log data using a custom callback logging function + * (which is set using `FT_Set_Log_Handler`). + * + * This function is defined in `ftdebug.c`. + */ + FT_BASE( void ) + FT_Logging_Callback( const char* fmt, + ... ); + +#endif /* FT_DEBUG_LOGGING */ + + +FT_END_HEADER + +#endif /* FTDEBUG_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftdrv.h b/Example/include/freetype/internal/ftdrv.h new file mode 100644 index 0000000..9001c07 --- /dev/null +++ b/Example/include/freetype/internal/ftdrv.h @@ -0,0 +1,289 @@ +/**************************************************************************** + * + * ftdrv.h + * + * FreeType internal font driver interface (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTDRV_H_ +#define FTDRV_H_ + + +#include + +#include "compiler-macros.h" + +FT_BEGIN_HEADER + + + typedef FT_Error + (*FT_Face_InitFunc)( FT_Stream stream, + FT_Face face, + FT_Int typeface_index, + FT_Int num_params, + FT_Parameter* parameters ); + + typedef void + (*FT_Face_DoneFunc)( FT_Face face ); + + + typedef FT_Error + (*FT_Size_InitFunc)( FT_Size size ); + + typedef void + (*FT_Size_DoneFunc)( FT_Size size ); + + + typedef FT_Error + (*FT_Slot_InitFunc)( FT_GlyphSlot slot ); + + typedef void + (*FT_Slot_DoneFunc)( FT_GlyphSlot slot ); + + + typedef FT_Error + (*FT_Size_RequestFunc)( FT_Size size, + FT_Size_Request req ); + + typedef FT_Error + (*FT_Size_SelectFunc)( FT_Size size, + FT_ULong size_index ); + + typedef FT_Error + (*FT_Slot_LoadFunc)( FT_GlyphSlot slot, + FT_Size size, + FT_UInt glyph_index, + FT_Int32 load_flags ); + + + typedef FT_Error + (*FT_Face_GetKerningFunc)( FT_Face face, + FT_UInt left_glyph, + FT_UInt right_glyph, + FT_Vector* kerning ); + + + typedef FT_Error + (*FT_Face_AttachFunc)( FT_Face face, + FT_Stream stream ); + + + typedef FT_Error + (*FT_Face_GetAdvancesFunc)( FT_Face face, + FT_UInt first, + FT_UInt count, + FT_Int32 flags, + FT_Fixed* advances ); + + + /************************************************************************** + * + * @struct: + * FT_Driver_ClassRec + * + * @description: + * The font driver class. This structure mostly contains pointers to + * driver methods. + * + * @fields: + * root :: + * The parent module. + * + * face_object_size :: + * The size of a face object in bytes. + * + * size_object_size :: + * The size of a size object in bytes. + * + * slot_object_size :: + * The size of a glyph object in bytes. + * + * init_face :: + * The format-specific face constructor. + * + * done_face :: + * The format-specific face destructor. + * + * init_size :: + * The format-specific size constructor. + * + * done_size :: + * The format-specific size destructor. + * + * init_slot :: + * The format-specific slot constructor. + * + * done_slot :: + * The format-specific slot destructor. + * + * + * load_glyph :: + * A function handle to load a glyph to a slot. This field is + * mandatory! + * + * get_kerning :: + * A function handle to return the unscaled kerning for a given pair of + * glyphs. Can be set to 0 if the format doesn't support kerning. + * + * attach_file :: + * This function handle is used to read additional data for a face from + * another file/stream. For example, this can be used to add data from + * AFM or PFM files on a Type 1 face, or a CIDMap on a CID-keyed face. + * + * get_advances :: + * A function handle used to return advance widths of 'count' glyphs + * (in font units), starting at 'first'. The 'vertical' flag must be + * set to get vertical advance heights. The 'advances' buffer is + * caller-allocated. The idea of this function is to be able to + * perform device-independent text layout without loading a single + * glyph image. + * + * request_size :: + * A handle to a function used to request the new character size. Can + * be set to 0 if the scaling done in the base layer suffices. + * + * select_size :: + * A handle to a function used to select a new fixed size. It is used + * only if @FT_FACE_FLAG_FIXED_SIZES is set. Can be set to 0 if the + * scaling done in the base layer suffices. + * + * @note: + * Most function pointers, with the exception of `load_glyph`, can be set + * to 0 to indicate a default behaviour. + */ + typedef struct FT_Driver_ClassRec_ + { + FT_Module_Class root; + + FT_Long face_object_size; + FT_Long size_object_size; + FT_Long slot_object_size; + + FT_Face_InitFunc init_face; + FT_Face_DoneFunc done_face; + + FT_Size_InitFunc init_size; + FT_Size_DoneFunc done_size; + + FT_Slot_InitFunc init_slot; + FT_Slot_DoneFunc done_slot; + + FT_Slot_LoadFunc load_glyph; + + FT_Face_GetKerningFunc get_kerning; + FT_Face_AttachFunc attach_file; + FT_Face_GetAdvancesFunc get_advances; + + /* since version 2.2 */ + FT_Size_RequestFunc request_size; + FT_Size_SelectFunc select_size; + + } FT_Driver_ClassRec, *FT_Driver_Class; + + + /************************************************************************** + * + * @macro: + * FT_DECLARE_DRIVER + * + * @description: + * Used to create a forward declaration of an FT_Driver_ClassRec struct + * instance. + * + * @macro: + * FT_DEFINE_DRIVER + * + * @description: + * Used to initialize an instance of FT_Driver_ClassRec struct. + * + * `ftinit.c` (ft_create_default_module_classes) already contains a + * mechanism to call these functions for the default modules described in + * `ftmodule.h`. + * + * The struct will be allocated in the global scope (or the scope where + * the macro is used). + */ +#define FT_DECLARE_DRIVER( class_ ) \ + FT_CALLBACK_TABLE \ + const FT_Driver_ClassRec class_; + +#define FT_DEFINE_DRIVER( \ + class_, \ + flags_, \ + size_, \ + name_, \ + version_, \ + requires_, \ + interface_, \ + init_, \ + done_, \ + get_interface_, \ + face_object_size_, \ + size_object_size_, \ + slot_object_size_, \ + init_face_, \ + done_face_, \ + init_size_, \ + done_size_, \ + init_slot_, \ + done_slot_, \ + load_glyph_, \ + get_kerning_, \ + attach_file_, \ + get_advances_, \ + request_size_, \ + select_size_ ) \ + FT_CALLBACK_TABLE_DEF \ + const FT_Driver_ClassRec class_ = \ + { \ + FT_DEFINE_ROOT_MODULE( flags_, \ + size_, \ + name_, \ + version_, \ + requires_, \ + interface_, \ + init_, \ + done_, \ + get_interface_ ) \ + \ + face_object_size_, \ + size_object_size_, \ + slot_object_size_, \ + \ + init_face_, \ + done_face_, \ + \ + init_size_, \ + done_size_, \ + \ + init_slot_, \ + done_slot_, \ + \ + load_glyph_, \ + \ + get_kerning_, \ + attach_file_, \ + get_advances_, \ + \ + request_size_, \ + select_size_ \ + }; + + +FT_END_HEADER + +#endif /* FTDRV_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftgloadr.h b/Example/include/freetype/internal/ftgloadr.h new file mode 100644 index 0000000..36e5509 --- /dev/null +++ b/Example/include/freetype/internal/ftgloadr.h @@ -0,0 +1,147 @@ +/**************************************************************************** + * + * ftgloadr.h + * + * The FreeType glyph loader (specification). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTGLOADR_H_ +#define FTGLOADR_H_ + + +#include + +#include "compiler-macros.h" + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @struct: + * FT_GlyphLoader + * + * @description: + * The glyph loader is an internal object used to load several glyphs + * together (for example, in the case of composites). + */ + typedef struct FT_SubGlyphRec_ + { + FT_Int index; + FT_UShort flags; + FT_Int arg1; + FT_Int arg2; + FT_Matrix transform; + + } FT_SubGlyphRec; + + + typedef struct FT_GlyphLoadRec_ + { + FT_Outline outline; /* outline */ + FT_Vector* extra_points; /* extra points table */ + FT_Vector* extra_points2; /* second extra points table */ + FT_UInt num_subglyphs; /* number of subglyphs */ + FT_SubGlyph subglyphs; /* subglyphs */ + + } FT_GlyphLoadRec, *FT_GlyphLoad; + + + typedef struct FT_GlyphLoaderRec_ + { + FT_Memory memory; + FT_UInt max_points; + FT_UInt max_contours; + FT_UInt max_subglyphs; + FT_Bool use_extra; + + FT_GlyphLoadRec base; + FT_GlyphLoadRec current; + + void* other; /* for possible future extension? */ + + } FT_GlyphLoaderRec, *FT_GlyphLoader; + + + /* create new empty glyph loader */ + FT_BASE( FT_Error ) + FT_GlyphLoader_New( FT_Memory memory, + FT_GlyphLoader *aloader ); + + /* add an extra points table to a glyph loader */ + FT_BASE( FT_Error ) + FT_GlyphLoader_CreateExtra( FT_GlyphLoader loader ); + + /* destroy a glyph loader */ + FT_BASE( void ) + FT_GlyphLoader_Done( FT_GlyphLoader loader ); + + /* reset a glyph loader (frees everything int it) */ + FT_BASE( void ) + FT_GlyphLoader_Reset( FT_GlyphLoader loader ); + + /* rewind a glyph loader */ + FT_BASE( void ) + FT_GlyphLoader_Rewind( FT_GlyphLoader loader ); + + /* check that there is enough space to add `n_points' and `n_contours' */ + /* to the glyph loader */ + FT_BASE( FT_Error ) + FT_GlyphLoader_CheckPoints( FT_GlyphLoader loader, + FT_UInt n_points, + FT_UInt n_contours ); + + +#define FT_GLYPHLOADER_CHECK_P( _loader, _count ) \ + ( (_count) == 0 || \ + ( (FT_UInt)(_loader)->base.outline.n_points + \ + (FT_UInt)(_loader)->current.outline.n_points + \ + (FT_UInt)(_count) ) <= (_loader)->max_points ) + +#define FT_GLYPHLOADER_CHECK_C( _loader, _count ) \ + ( (_count) == 0 || \ + ( (FT_UInt)(_loader)->base.outline.n_contours + \ + (FT_UInt)(_loader)->current.outline.n_contours + \ + (FT_UInt)(_count) ) <= (_loader)->max_contours ) + +#define FT_GLYPHLOADER_CHECK_POINTS( _loader, _points, _contours ) \ + ( ( FT_GLYPHLOADER_CHECK_P( _loader, _points ) && \ + FT_GLYPHLOADER_CHECK_C( _loader, _contours ) ) \ + ? 0 \ + : FT_GlyphLoader_CheckPoints( (_loader), \ + (FT_UInt)(_points), \ + (FT_UInt)(_contours) ) ) + + + /* check that there is enough space to add `n_subs' sub-glyphs to */ + /* a glyph loader */ + FT_BASE( FT_Error ) + FT_GlyphLoader_CheckSubGlyphs( FT_GlyphLoader loader, + FT_UInt n_subs ); + + /* prepare a glyph loader, i.e. empty the current glyph */ + FT_BASE( void ) + FT_GlyphLoader_Prepare( FT_GlyphLoader loader ); + + /* add the current glyph to the base glyph */ + FT_BASE( void ) + FT_GlyphLoader_Add( FT_GlyphLoader loader ); + + +FT_END_HEADER + +#endif /* FTGLOADR_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/fthash.h b/Example/include/freetype/internal/fthash.h new file mode 100644 index 0000000..622ec76 --- /dev/null +++ b/Example/include/freetype/internal/fthash.h @@ -0,0 +1,135 @@ +/**************************************************************************** + * + * fthash.h + * + * Hashing functions (specification). + * + */ + +/* + * Copyright 2000 Computing Research Labs, New Mexico State University + * Copyright 2001-2015 + * Francesco Zappa Nardelli + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COMPUTING RESEARCH LAB OR NEW MEXICO STATE UNIVERSITY BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT + * OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR + * THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + + /************************************************************************** + * + * This file is based on code from bdf.c,v 1.22 2000/03/16 20:08:50 + * + * taken from Mark Leisher's xmbdfed package + * + */ + + +#ifndef FTHASH_H_ +#define FTHASH_H_ + + +#include + + +FT_BEGIN_HEADER + + + typedef union FT_Hashkey_ + { + FT_Int num; + const char* str; + + } FT_Hashkey; + + + typedef struct FT_HashnodeRec_ + { + FT_Hashkey key; + size_t data; + + } FT_HashnodeRec; + + typedef struct FT_HashnodeRec_ *FT_Hashnode; + + + typedef FT_ULong + (*FT_Hash_LookupFunc)( FT_Hashkey* key ); + + typedef FT_Bool + (*FT_Hash_CompareFunc)( FT_Hashkey* a, + FT_Hashkey* b ); + + + typedef struct FT_HashRec_ + { + FT_UInt limit; + FT_UInt size; + FT_UInt used; + + FT_Hash_LookupFunc lookup; + FT_Hash_CompareFunc compare; + + FT_Hashnode* table; + + } FT_HashRec; + + typedef struct FT_HashRec_ *FT_Hash; + + + FT_Error + ft_hash_str_init( FT_Hash hash, + FT_Memory memory ); + + FT_Error + ft_hash_num_init( FT_Hash hash, + FT_Memory memory ); + + void + ft_hash_str_free( FT_Hash hash, + FT_Memory memory ); + +#define ft_hash_num_free ft_hash_str_free + + FT_Error + ft_hash_str_insert( const char* key, + size_t data, + FT_Hash hash, + FT_Memory memory ); + + FT_Error + ft_hash_num_insert( FT_Int num, + size_t data, + FT_Hash hash, + FT_Memory memory ); + + size_t* + ft_hash_str_lookup( const char* key, + FT_Hash hash ); + + size_t* + ft_hash_num_lookup( FT_Int num, + FT_Hash hash ); + + +FT_END_HEADER + + +#endif /* FTHASH_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftmemory.h b/Example/include/freetype/internal/ftmemory.h new file mode 100644 index 0000000..5eb1d21 --- /dev/null +++ b/Example/include/freetype/internal/ftmemory.h @@ -0,0 +1,398 @@ +/**************************************************************************** + * + * ftmemory.h + * + * The FreeType memory management macros (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTMEMORY_H_ +#define FTMEMORY_H_ + + +#include +#include FT_CONFIG_CONFIG_H +#include + +#include "compiler-macros.h" + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @macro: + * FT_SET_ERROR + * + * @description: + * This macro is used to set an implicit 'error' variable to a given + * expression's value (usually a function call), and convert it to a + * boolean which is set whenever the value is != 0. + */ +#undef FT_SET_ERROR +#define FT_SET_ERROR( expression ) \ + ( ( error = (expression) ) != 0 ) + + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** M E M O R Y ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /* The calculation `NULL + n' is undefined in C. Even if the resulting */ + /* pointer doesn't get dereferenced, this causes warnings with */ + /* sanitizers. */ + /* */ + /* We thus provide a macro that should be used if `base' can be NULL. */ +#define FT_OFFSET( base, count ) ( (base) ? (base) + (count) : NULL ) + + + /* + * C++ refuses to handle statements like p = (void*)anything, with `p' a + * typed pointer. Since we don't have a `typeof' operator in standard C++, + * we have to use a template to emulate it. + */ + +#ifdef __cplusplus + +extern "C++" +{ + template inline T* + cplusplus_typeof( T*, + void *v ) + { + return static_cast ( v ); + } +} + +#define FT_ASSIGNP( p, val ) (p) = cplusplus_typeof( (p), (val) ) + +#else + +#define FT_ASSIGNP( p, val ) (p) = (val) + +#endif + + + +#ifdef FT_DEBUG_MEMORY + + FT_BASE( const char* ) ft_debug_file_; + FT_BASE( long ) ft_debug_lineno_; + +#define FT_DEBUG_INNER( exp ) ( ft_debug_file_ = __FILE__, \ + ft_debug_lineno_ = __LINE__, \ + (exp) ) + +#define FT_ASSIGNP_INNER( p, exp ) ( ft_debug_file_ = __FILE__, \ + ft_debug_lineno_ = __LINE__, \ + FT_ASSIGNP( p, exp ) ) + +#else /* !FT_DEBUG_MEMORY */ + +#define FT_DEBUG_INNER( exp ) (exp) +#define FT_ASSIGNP_INNER( p, exp ) FT_ASSIGNP( p, exp ) + +#endif /* !FT_DEBUG_MEMORY */ + + + /* + * The allocation functions return a pointer, and the error code is written + * to through the `p_error' parameter. + */ + + /* The `q' variants of the functions below (`q' for `quick') don't fill */ + /* the allocated or reallocated memory with zero bytes. */ + + FT_BASE( FT_Pointer ) + ft_mem_alloc( FT_Memory memory, + FT_Long size, + FT_Error *p_error ); + + FT_BASE( FT_Pointer ) + ft_mem_qalloc( FT_Memory memory, + FT_Long size, + FT_Error *p_error ); + + FT_BASE( FT_Pointer ) + ft_mem_realloc( FT_Memory memory, + FT_Long item_size, + FT_Long cur_count, + FT_Long new_count, + void* block, + FT_Error *p_error ); + + FT_BASE( FT_Pointer ) + ft_mem_qrealloc( FT_Memory memory, + FT_Long item_size, + FT_Long cur_count, + FT_Long new_count, + void* block, + FT_Error *p_error ); + + FT_BASE( void ) + ft_mem_free( FT_Memory memory, + const void* P ); + + + /* The `Q' variants of the macros below (`Q' for `quick') don't fill */ + /* the allocated or reallocated memory with zero bytes. */ + +#define FT_MEM_ALLOC( ptr, size ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_alloc( memory, \ + (FT_Long)(size), \ + &error ) ) + +#define FT_MEM_FREE( ptr ) \ + FT_BEGIN_STMNT \ + FT_DEBUG_INNER( ft_mem_free( memory, (ptr) ) ); \ + (ptr) = NULL; \ + FT_END_STMNT + +#define FT_MEM_NEW( ptr ) \ + FT_MEM_ALLOC( ptr, sizeof ( *(ptr) ) ) + +#define FT_MEM_REALLOC( ptr, cursz, newsz ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_realloc( memory, \ + 1, \ + (FT_Long)(cursz), \ + (FT_Long)(newsz), \ + (ptr), \ + &error ) ) + +#define FT_MEM_QALLOC( ptr, size ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_qalloc( memory, \ + (FT_Long)(size), \ + &error ) ) + +#define FT_MEM_QNEW( ptr ) \ + FT_MEM_QALLOC( ptr, sizeof ( *(ptr) ) ) + +#define FT_MEM_QREALLOC( ptr, cursz, newsz ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_qrealloc( memory, \ + 1, \ + (FT_Long)(cursz), \ + (FT_Long)(newsz), \ + (ptr), \ + &error ) ) + +#define FT_MEM_ALLOC_MULT( ptr, count, item_size ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_realloc( memory, \ + (FT_Long)(item_size), \ + 0, \ + (FT_Long)(count), \ + NULL, \ + &error ) ) + +#define FT_MEM_REALLOC_MULT( ptr, oldcnt, newcnt, itmsz ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_realloc( memory, \ + (FT_Long)(itmsz), \ + (FT_Long)(oldcnt), \ + (FT_Long)(newcnt), \ + (ptr), \ + &error ) ) + +#define FT_MEM_QALLOC_MULT( ptr, count, item_size ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_qrealloc( memory, \ + (FT_Long)(item_size), \ + 0, \ + (FT_Long)(count), \ + NULL, \ + &error ) ) + +#define FT_MEM_QREALLOC_MULT( ptr, oldcnt, newcnt, itmsz ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_qrealloc( memory, \ + (FT_Long)(itmsz), \ + (FT_Long)(oldcnt), \ + (FT_Long)(newcnt), \ + (ptr), \ + &error ) ) + + +#define FT_MEM_SET_ERROR( cond ) ( (cond), error != 0 ) + + +#define FT_MEM_SET( dest, byte, count ) \ + ft_memset( dest, byte, (FT_Offset)(count) ) + +#define FT_MEM_COPY( dest, source, count ) \ + ft_memcpy( dest, source, (FT_Offset)(count) ) + +#define FT_MEM_MOVE( dest, source, count ) \ + ft_memmove( dest, source, (FT_Offset)(count) ) + + +#define FT_MEM_ZERO( dest, count ) FT_MEM_SET( dest, 0, count ) + +#define FT_ZERO( p ) FT_MEM_ZERO( p, sizeof ( *(p) ) ) + + +#define FT_ARRAY_ZERO( dest, count ) \ + FT_MEM_ZERO( dest, \ + (FT_Offset)(count) * sizeof ( *(dest) ) ) + +#define FT_ARRAY_COPY( dest, source, count ) \ + FT_MEM_COPY( dest, \ + source, \ + (FT_Offset)(count) * sizeof ( *(dest) ) ) + +#define FT_ARRAY_MOVE( dest, source, count ) \ + FT_MEM_MOVE( dest, \ + source, \ + (FT_Offset)(count) * sizeof ( *(dest) ) ) + + + /* + * Return the maximum number of addressable elements in an array. We limit + * ourselves to INT_MAX, rather than UINT_MAX, to avoid any problems. + */ +#define FT_ARRAY_MAX( ptr ) ( FT_INT_MAX / sizeof ( *(ptr) ) ) + +#define FT_ARRAY_CHECK( ptr, count ) ( (count) <= FT_ARRAY_MAX( ptr ) ) + + + /************************************************************************** + * + * The following functions macros expect that their pointer argument is + * _typed_ in order to automatically compute array element sizes. + */ + +#define FT_MEM_NEW_ARRAY( ptr, count ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_realloc( memory, \ + sizeof ( *(ptr) ), \ + 0, \ + (FT_Long)(count), \ + NULL, \ + &error ) ) + +#define FT_MEM_RENEW_ARRAY( ptr, cursz, newsz ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_realloc( memory, \ + sizeof ( *(ptr) ), \ + (FT_Long)(cursz), \ + (FT_Long)(newsz), \ + (ptr), \ + &error ) ) + +#define FT_MEM_QNEW_ARRAY( ptr, count ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_qrealloc( memory, \ + sizeof ( *(ptr) ), \ + 0, \ + (FT_Long)(count), \ + NULL, \ + &error ) ) + +#define FT_MEM_QRENEW_ARRAY( ptr, cursz, newsz ) \ + FT_ASSIGNP_INNER( ptr, ft_mem_qrealloc( memory, \ + sizeof ( *(ptr) ), \ + (FT_Long)(cursz), \ + (FT_Long)(newsz), \ + (ptr), \ + &error ) ) + +#define FT_ALLOC( ptr, size ) \ + FT_MEM_SET_ERROR( FT_MEM_ALLOC( ptr, size ) ) + +#define FT_REALLOC( ptr, cursz, newsz ) \ + FT_MEM_SET_ERROR( FT_MEM_REALLOC( ptr, cursz, newsz ) ) + +#define FT_ALLOC_MULT( ptr, count, item_size ) \ + FT_MEM_SET_ERROR( FT_MEM_ALLOC_MULT( ptr, count, item_size ) ) + +#define FT_REALLOC_MULT( ptr, oldcnt, newcnt, itmsz ) \ + FT_MEM_SET_ERROR( FT_MEM_REALLOC_MULT( ptr, oldcnt, \ + newcnt, itmsz ) ) + +#define FT_QALLOC( ptr, size ) \ + FT_MEM_SET_ERROR( FT_MEM_QALLOC( ptr, size ) ) + +#define FT_QREALLOC( ptr, cursz, newsz ) \ + FT_MEM_SET_ERROR( FT_MEM_QREALLOC( ptr, cursz, newsz ) ) + +#define FT_QALLOC_MULT( ptr, count, item_size ) \ + FT_MEM_SET_ERROR( FT_MEM_QALLOC_MULT( ptr, count, item_size ) ) + +#define FT_QREALLOC_MULT( ptr, oldcnt, newcnt, itmsz ) \ + FT_MEM_SET_ERROR( FT_MEM_QREALLOC_MULT( ptr, oldcnt, \ + newcnt, itmsz ) ) + +#define FT_FREE( ptr ) FT_MEM_FREE( ptr ) + +#define FT_NEW( ptr ) FT_MEM_SET_ERROR( FT_MEM_NEW( ptr ) ) + +#define FT_NEW_ARRAY( ptr, count ) \ + FT_MEM_SET_ERROR( FT_MEM_NEW_ARRAY( ptr, count ) ) + +#define FT_RENEW_ARRAY( ptr, curcnt, newcnt ) \ + FT_MEM_SET_ERROR( FT_MEM_RENEW_ARRAY( ptr, curcnt, newcnt ) ) + +#define FT_QNEW( ptr ) FT_MEM_SET_ERROR( FT_MEM_QNEW( ptr ) ) + +#define FT_QNEW_ARRAY( ptr, count ) \ + FT_MEM_SET_ERROR( FT_MEM_QNEW_ARRAY( ptr, count ) ) + +#define FT_QRENEW_ARRAY( ptr, curcnt, newcnt ) \ + FT_MEM_SET_ERROR( FT_MEM_QRENEW_ARRAY( ptr, curcnt, newcnt ) ) + + + FT_BASE( FT_Pointer ) + ft_mem_strdup( FT_Memory memory, + const char* str, + FT_Error *p_error ); + + FT_BASE( FT_Pointer ) + ft_mem_dup( FT_Memory memory, + const void* address, + FT_ULong size, + FT_Error *p_error ); + + +#define FT_MEM_STRDUP( dst, str ) \ + (dst) = (char*)ft_mem_strdup( memory, (const char*)(str), &error ) + +#define FT_STRDUP( dst, str ) \ + FT_MEM_SET_ERROR( FT_MEM_STRDUP( dst, str ) ) + +#define FT_MEM_DUP( dst, address, size ) \ + (dst) = ft_mem_dup( memory, (address), (FT_ULong)(size), &error ) + +#define FT_DUP( dst, address, size ) \ + FT_MEM_SET_ERROR( FT_MEM_DUP( dst, address, size ) ) + + + /* Return >= 1 if a truncation occurs. */ + /* Return 0 if the source string fits the buffer. */ + /* This is *not* the same as strlcpy(). */ + FT_BASE( FT_Int ) + ft_mem_strcpyn( char* dst, + const char* src, + FT_ULong size ); + +#define FT_STRCPYN( dst, src, size ) \ + ft_mem_strcpyn( (char*)dst, (const char*)(src), (FT_ULong)(size) ) + + +FT_END_HEADER + +#endif /* FTMEMORY_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftmmtypes.h b/Example/include/freetype/internal/ftmmtypes.h new file mode 100644 index 0000000..c4b21d6 --- /dev/null +++ b/Example/include/freetype/internal/ftmmtypes.h @@ -0,0 +1,91 @@ +/**************************************************************************** + * + * ftmmtypes.h + * + * OpenType Variations type definitions for internal use + * with the multi-masters service (specification). + * + * Copyright (C) 2022-2023 by + * David Turner, Robert Wilhelm, Werner Lemberg, George Williams, and + * Dominik Röttsches. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTMMTYPES_H_ +#define FTMMTYPES_H_ + +FT_BEGIN_HEADER + + + typedef FT_Int32 FT_ItemVarDelta; + + typedef struct GX_ItemVarDataRec_ + { + FT_UInt itemCount; /* Number of delta sets per item. */ + FT_UInt regionIdxCount; /* Number of region indices. */ + FT_UInt* regionIndices; /* Array of `regionCount` indices; */ + /* these index `varRegionList`. */ + FT_Byte* deltaSet; /* Array of `itemCount` deltas; */ + /* use `innerIndex` for this array. */ + FT_UShort wordDeltaCount; /* Number of the first 32-bit ints */ + /* or 16-bit ints of `deltaSet` */ + /* depending on `longWords`. */ + FT_Bool longWords; /* If true, `deltaSet` is a 32-bit */ + /* array followed by a 16-bit */ + /* array, otherwise a 16-bit array */ + /* followed by an 8-bit array. */ + } GX_ItemVarDataRec, *GX_ItemVarData; + + + /* contribution of one axis to a region */ + typedef struct GX_AxisCoordsRec_ + { + FT_Fixed startCoord; + FT_Fixed peakCoord; /* zero means no effect (factor = 1) */ + FT_Fixed endCoord; + + } GX_AxisCoordsRec, *GX_AxisCoords; + + + typedef struct GX_VarRegionRec_ + { + GX_AxisCoords axisList; /* array of axisCount records */ + + } GX_VarRegionRec, *GX_VarRegion; + + + /* item variation store */ + typedef struct GX_ItemVarStoreRec_ + { + FT_UInt dataCount; + GX_ItemVarData varData; /* array of dataCount records; */ + /* use `outerIndex' for this array */ + FT_UShort axisCount; + FT_UInt regionCount; /* total number of regions defined */ + GX_VarRegion varRegionList; + + } GX_ItemVarStoreRec, *GX_ItemVarStore; + + + typedef struct GX_DeltaSetIdxMapRec_ + { + FT_ULong mapCount; + FT_UInt* outerIndex; /* indices to item var data */ + FT_UInt* innerIndex; /* indices to delta set */ + + } GX_DeltaSetIdxMapRec, *GX_DeltaSetIdxMap; + + +FT_END_HEADER + +#endif /* FTMMTYPES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftobjs.h b/Example/include/freetype/internal/ftobjs.h new file mode 100644 index 0000000..28bc9b6 --- /dev/null +++ b/Example/include/freetype/internal/ftobjs.h @@ -0,0 +1,1238 @@ +/**************************************************************************** + * + * ftobjs.h + * + * The FreeType private base classes (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This file contains the definition of all internal FreeType classes. + * + */ + + +#ifndef FTOBJS_H_ +#define FTOBJS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef FT_CONFIG_OPTION_INCREMENTAL +#include +#endif + +#include "compiler-macros.h" + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * Some generic definitions. + */ +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef NULL +#define NULL (void*)0 +#endif + + + /************************************************************************** + * + * The min and max functions missing in C. As usual, be careful not to + * write things like FT_MIN( a++, b++ ) to avoid side effects. + */ +#define FT_MIN( a, b ) ( (a) < (b) ? (a) : (b) ) +#define FT_MAX( a, b ) ( (a) > (b) ? (a) : (b) ) + +#define FT_ABS( a ) ( (a) < 0 ? -(a) : (a) ) + + /* + * Approximate sqrt(x*x+y*y) using the `alpha max plus beta min' algorithm. + * We use alpha = 1, beta = 3/8, giving us results with a largest error + * less than 7% compared to the exact value. + */ +#define FT_HYPOT( x, y ) \ + ( x = FT_ABS( x ), \ + y = FT_ABS( y ), \ + x > y ? x + ( 3 * y >> 3 ) \ + : y + ( 3 * x >> 3 ) ) + + /* we use FT_TYPEOF to suppress signedness compilation warnings */ +#define FT_PAD_FLOOR( x, n ) ( (x) & ~FT_TYPEOF( x )( (n) - 1 ) ) +#define FT_PAD_ROUND( x, n ) FT_PAD_FLOOR( (x) + (n) / 2, n ) +#define FT_PAD_CEIL( x, n ) FT_PAD_FLOOR( (x) + (n) - 1, n ) + +#define FT_PIX_FLOOR( x ) ( (x) & ~FT_TYPEOF( x )63 ) +#define FT_PIX_ROUND( x ) FT_PIX_FLOOR( (x) + 32 ) +#define FT_PIX_CEIL( x ) FT_PIX_FLOOR( (x) + 63 ) + + /* specialized versions (for signed values) */ + /* that don't produce run-time errors due to integer overflow */ +#define FT_PAD_ROUND_LONG( x, n ) FT_PAD_FLOOR( ADD_LONG( (x), (n) / 2 ), \ + n ) +#define FT_PAD_CEIL_LONG( x, n ) FT_PAD_FLOOR( ADD_LONG( (x), (n) - 1 ), \ + n ) +#define FT_PIX_ROUND_LONG( x ) FT_PIX_FLOOR( ADD_LONG( (x), 32 ) ) +#define FT_PIX_CEIL_LONG( x ) FT_PIX_FLOOR( ADD_LONG( (x), 63 ) ) + +#define FT_PAD_ROUND_INT32( x, n ) FT_PAD_FLOOR( ADD_INT32( (x), (n) / 2 ), \ + n ) +#define FT_PAD_CEIL_INT32( x, n ) FT_PAD_FLOOR( ADD_INT32( (x), (n) - 1 ), \ + n ) +#define FT_PIX_ROUND_INT32( x ) FT_PIX_FLOOR( ADD_INT32( (x), 32 ) ) +#define FT_PIX_CEIL_INT32( x ) FT_PIX_FLOOR( ADD_INT32( (x), 63 ) ) + + + /* + * character classification functions -- since these are used to parse font + * files, we must not use those in which are locale-dependent + */ +#define ft_isdigit( x ) ( ( (unsigned)(x) - '0' ) < 10U ) + +#define ft_isxdigit( x ) ( ( (unsigned)(x) - '0' ) < 10U || \ + ( (unsigned)(x) - 'a' ) < 6U || \ + ( (unsigned)(x) - 'A' ) < 6U ) + + /* the next two macros assume ASCII representation */ +#define ft_isupper( x ) ( ( (unsigned)(x) - 'A' ) < 26U ) +#define ft_islower( x ) ( ( (unsigned)(x) - 'a' ) < 26U ) + +#define ft_isalpha( x ) ( ft_isupper( x ) || ft_islower( x ) ) +#define ft_isalnum( x ) ( ft_isdigit( x ) || ft_isalpha( x ) ) + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** C H A R M A P S ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + /* handle to internal charmap object */ + typedef struct FT_CMapRec_* FT_CMap; + + /* handle to charmap class structure */ + typedef const struct FT_CMap_ClassRec_* FT_CMap_Class; + + /* internal charmap object structure */ + typedef struct FT_CMapRec_ + { + FT_CharMapRec charmap; + FT_CMap_Class clazz; + + } FT_CMapRec; + + /* typecast any pointer to a charmap handle */ +#define FT_CMAP( x ) ( (FT_CMap)( x ) ) + + /* obvious macros */ +#define FT_CMAP_PLATFORM_ID( x ) FT_CMAP( x )->charmap.platform_id +#define FT_CMAP_ENCODING_ID( x ) FT_CMAP( x )->charmap.encoding_id +#define FT_CMAP_ENCODING( x ) FT_CMAP( x )->charmap.encoding +#define FT_CMAP_FACE( x ) FT_CMAP( x )->charmap.face + + + /* class method definitions */ + typedef FT_Error + (*FT_CMap_InitFunc)( FT_CMap cmap, + FT_Pointer init_data ); + + typedef void + (*FT_CMap_DoneFunc)( FT_CMap cmap ); + + typedef FT_UInt + (*FT_CMap_CharIndexFunc)( FT_CMap cmap, + FT_UInt32 char_code ); + + typedef FT_UInt + (*FT_CMap_CharNextFunc)( FT_CMap cmap, + FT_UInt32 *achar_code ); + + typedef FT_UInt + (*FT_CMap_CharVarIndexFunc)( FT_CMap cmap, + FT_CMap unicode_cmap, + FT_UInt32 char_code, + FT_UInt32 variant_selector ); + + typedef FT_Int + (*FT_CMap_CharVarIsDefaultFunc)( FT_CMap cmap, + FT_UInt32 char_code, + FT_UInt32 variant_selector ); + + typedef FT_UInt32 * + (*FT_CMap_VariantListFunc)( FT_CMap cmap, + FT_Memory mem ); + + typedef FT_UInt32 * + (*FT_CMap_CharVariantListFunc)( FT_CMap cmap, + FT_Memory mem, + FT_UInt32 char_code ); + + typedef FT_UInt32 * + (*FT_CMap_VariantCharListFunc)( FT_CMap cmap, + FT_Memory mem, + FT_UInt32 variant_selector ); + + + typedef struct FT_CMap_ClassRec_ + { + FT_ULong size; + + FT_CMap_InitFunc init; + FT_CMap_DoneFunc done; + FT_CMap_CharIndexFunc char_index; + FT_CMap_CharNextFunc char_next; + + /* Subsequent entries are special ones for format 14 -- the variant */ + /* selector subtable which behaves like no other */ + + FT_CMap_CharVarIndexFunc char_var_index; + FT_CMap_CharVarIsDefaultFunc char_var_default; + FT_CMap_VariantListFunc variant_list; + FT_CMap_CharVariantListFunc charvariant_list; + FT_CMap_VariantCharListFunc variantchar_list; + + } FT_CMap_ClassRec; + + +#define FT_DECLARE_CMAP_CLASS( class_ ) \ + FT_CALLBACK_TABLE const FT_CMap_ClassRec class_; + +#define FT_DEFINE_CMAP_CLASS( \ + class_, \ + size_, \ + init_, \ + done_, \ + char_index_, \ + char_next_, \ + char_var_index_, \ + char_var_default_, \ + variant_list_, \ + charvariant_list_, \ + variantchar_list_ ) \ + FT_CALLBACK_TABLE_DEF \ + const FT_CMap_ClassRec class_ = \ + { \ + size_, \ + init_, \ + done_, \ + char_index_, \ + char_next_, \ + char_var_index_, \ + char_var_default_, \ + variant_list_, \ + charvariant_list_, \ + variantchar_list_ \ + }; + + + /* create a new charmap and add it to charmap->face */ + FT_BASE( FT_Error ) + FT_CMap_New( FT_CMap_Class clazz, + FT_Pointer init_data, + FT_CharMap charmap, + FT_CMap *acmap ); + + /* destroy a charmap and remove it from face's list */ + FT_BASE( void ) + FT_CMap_Done( FT_CMap cmap ); + + + /* add LCD padding to CBox */ + FT_BASE( void ) + ft_lcd_padding( FT_BBox* cbox, + FT_GlyphSlot slot, + FT_Render_Mode mode ); + +#ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING + + typedef void (*FT_Bitmap_LcdFilterFunc)( FT_Bitmap* bitmap, + FT_Byte* weights ); + + + /* This is the default LCD filter, an in-place, 5-tap FIR filter. */ + FT_BASE( void ) + ft_lcd_filter_fir( FT_Bitmap* bitmap, + FT_LcdFiveTapFilter weights ); + +#endif /* FT_CONFIG_OPTION_SUBPIXEL_RENDERING */ + + /************************************************************************** + * + * @struct: + * FT_Face_InternalRec + * + * @description: + * This structure contains the internal fields of each FT_Face object. + * These fields may change between different releases of FreeType. + * + * @fields: + * max_points :: + * The maximum number of points used to store the vectorial outline of + * any glyph in this face. If this value cannot be known in advance, + * or if the face isn't scalable, this should be set to 0. Only + * relevant for scalable formats. + * + * max_contours :: + * The maximum number of contours used to store the vectorial outline + * of any glyph in this face. If this value cannot be known in + * advance, or if the face isn't scalable, this should be set to 0. + * Only relevant for scalable formats. + * + * transform_matrix :: + * A 2x2 matrix of 16.16 coefficients used to transform glyph outlines + * after they are loaded from the font. Only used by the convenience + * functions. + * + * transform_delta :: + * A translation vector used to transform glyph outlines after they are + * loaded from the font. Only used by the convenience functions. + * + * transform_flags :: + * Some flags used to classify the transform. Only used by the + * convenience functions. + * + * services :: + * A cache for frequently used services. It should be only accessed + * with the macro `FT_FACE_LOOKUP_SERVICE`. + * + * incremental_interface :: + * If non-null, the interface through which glyph data and metrics are + * loaded incrementally for faces that do not provide all of this data + * when first opened. This field exists only if + * @FT_CONFIG_OPTION_INCREMENTAL is defined. + * + * no_stem_darkening :: + * Overrides the module-level default, see @stem-darkening[cff], for + * example. FALSE and TRUE toggle stem darkening on and off, + * respectively, value~-1 means to use the module/driver default. + * + * random_seed :: + * If positive, override the seed value for the CFF 'random' operator. + * Value~0 means to use the font's value. Value~-1 means to use the + * CFF driver's default. + * + * lcd_weights :: + * lcd_filter_func :: + * These fields specify the LCD filtering weights and callback function + * for ClearType-style subpixel rendering. + * + * refcount :: + * A counter initialized to~1 at the time an @FT_Face structure is + * created. @FT_Reference_Face increments this counter, and + * @FT_Done_Face only destroys a face if the counter is~1, otherwise it + * simply decrements it. + */ + typedef struct FT_Face_InternalRec_ + { + FT_Matrix transform_matrix; + FT_Vector transform_delta; + FT_Int transform_flags; + + FT_ServiceCacheRec services; + +#ifdef FT_CONFIG_OPTION_INCREMENTAL + FT_Incremental_InterfaceRec* incremental_interface; +#endif + + FT_Char no_stem_darkening; + FT_Int32 random_seed; + +#ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING + FT_LcdFiveTapFilter lcd_weights; /* filter weights, if any */ + FT_Bitmap_LcdFilterFunc lcd_filter_func; /* filtering callback */ +#endif + + FT_Int refcount; + + } FT_Face_InternalRec; + + + /************************************************************************** + * + * @struct: + * FT_Slot_InternalRec + * + * @description: + * This structure contains the internal fields of each FT_GlyphSlot + * object. These fields may change between different releases of + * FreeType. + * + * @fields: + * loader :: + * The glyph loader object used to load outlines into the glyph slot. + * + * flags :: + * Possible values are zero or FT_GLYPH_OWN_BITMAP. The latter + * indicates that the FT_GlyphSlot structure owns the bitmap buffer. + * + * glyph_transformed :: + * Boolean. Set to TRUE when the loaded glyph must be transformed + * through a specific font transformation. This is _not_ the same as + * the face transform set through FT_Set_Transform(). + * + * glyph_matrix :: + * The 2x2 matrix corresponding to the glyph transformation, if + * necessary. + * + * glyph_delta :: + * The 2d translation vector corresponding to the glyph transformation, + * if necessary. + * + * glyph_hints :: + * Format-specific glyph hints management. + * + * load_flags :: + * The load flags passed as an argument to @FT_Load_Glyph while + * initializing the glyph slot. + */ + +#define FT_GLYPH_OWN_BITMAP 0x1U +#define FT_GLYPH_OWN_GZIP_SVG 0x2U + + typedef struct FT_Slot_InternalRec_ + { + FT_GlyphLoader loader; + FT_UInt flags; + FT_Bool glyph_transformed; + FT_Matrix glyph_matrix; + FT_Vector glyph_delta; + void* glyph_hints; + + FT_Int32 load_flags; + + } FT_GlyphSlot_InternalRec; + + + /************************************************************************** + * + * @struct: + * FT_Size_InternalRec + * + * @description: + * This structure contains the internal fields of each FT_Size object. + * + * @fields: + * module_data :: + * Data specific to a driver module. + * + * autohint_mode :: + * The used auto-hinting mode. + * + * autohint_metrics :: + * Metrics used by the auto-hinter. + * + */ + + typedef struct FT_Size_InternalRec_ + { + void* module_data; + + FT_Render_Mode autohint_mode; + FT_Size_Metrics autohint_metrics; + + } FT_Size_InternalRec; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** M O D U L E S ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * FT_ModuleRec + * + * @description: + * A module object instance. + * + * @fields: + * clazz :: + * A pointer to the module's class. + * + * library :: + * A handle to the parent library object. + * + * memory :: + * A handle to the memory manager. + */ + typedef struct FT_ModuleRec_ + { + FT_Module_Class* clazz; + FT_Library library; + FT_Memory memory; + + } FT_ModuleRec; + + + /* typecast an object to an FT_Module */ +#define FT_MODULE( x ) ( (FT_Module)(x) ) + +#define FT_MODULE_CLASS( x ) FT_MODULE( x )->clazz +#define FT_MODULE_LIBRARY( x ) FT_MODULE( x )->library +#define FT_MODULE_MEMORY( x ) FT_MODULE( x )->memory + + +#define FT_MODULE_IS_DRIVER( x ) ( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_FONT_DRIVER ) + +#define FT_MODULE_IS_RENDERER( x ) ( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_RENDERER ) + +#define FT_MODULE_IS_HINTER( x ) ( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_HINTER ) + +#define FT_MODULE_IS_STYLER( x ) ( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_STYLER ) + +#define FT_DRIVER_IS_SCALABLE( x ) ( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_DRIVER_SCALABLE ) + +#define FT_DRIVER_USES_OUTLINES( x ) !( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_DRIVER_NO_OUTLINES ) + +#define FT_DRIVER_HAS_HINTER( x ) ( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_DRIVER_HAS_HINTER ) + +#define FT_DRIVER_HINTS_LIGHTLY( x ) ( FT_MODULE_CLASS( x )->module_flags & \ + FT_MODULE_DRIVER_HINTS_LIGHTLY ) + + + /************************************************************************** + * + * @function: + * FT_Get_Module_Interface + * + * @description: + * Finds a module and returns its specific interface as a typeless + * pointer. + * + * @input: + * library :: + * A handle to the library object. + * + * module_name :: + * The module's name (as an ASCII string). + * + * @return: + * A module-specific interface if available, 0 otherwise. + * + * @note: + * You should better be familiar with FreeType internals to know which + * module to look for, and what its interface is :-) + */ + FT_BASE( const void* ) + FT_Get_Module_Interface( FT_Library library, + const char* mod_name ); + + FT_BASE( FT_Pointer ) + ft_module_get_service( FT_Module module, + const char* service_id, + FT_Bool global ); + +#ifdef FT_CONFIG_OPTION_ENVIRONMENT_PROPERTIES + FT_BASE( FT_Error ) + ft_property_string_set( FT_Library library, + const FT_String* module_name, + const FT_String* property_name, + FT_String* value ); +#endif + + /* */ + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** F A C E, S I Z E & G L Y P H S L O T O B J E C T S ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + /* a few macros used to perform easy typecasts with minimal brain damage */ + +#define FT_FACE( x ) ( (FT_Face)(x) ) +#define FT_SIZE( x ) ( (FT_Size)(x) ) +#define FT_SLOT( x ) ( (FT_GlyphSlot)(x) ) + +#define FT_FACE_DRIVER( x ) FT_FACE( x )->driver +#define FT_FACE_LIBRARY( x ) FT_FACE_DRIVER( x )->root.library +#define FT_FACE_MEMORY( x ) FT_FACE( x )->memory +#define FT_FACE_STREAM( x ) FT_FACE( x )->stream + +#define FT_SIZE_FACE( x ) FT_SIZE( x )->face +#define FT_SLOT_FACE( x ) FT_SLOT( x )->face + +#define FT_FACE_SLOT( x ) FT_FACE( x )->glyph +#define FT_FACE_SIZE( x ) FT_FACE( x )->size + + + /************************************************************************** + * + * @function: + * FT_New_GlyphSlot + * + * @description: + * It is sometimes useful to have more than one glyph slot for a given + * face object. This function is used to create additional slots. All + * of them are automatically discarded when the face is destroyed. + * + * @input: + * face :: + * A handle to a parent face object. + * + * @output: + * aslot :: + * A handle to a new glyph slot object. + * + * @return: + * FreeType error code. 0 means success. + */ + FT_BASE( FT_Error ) + FT_New_GlyphSlot( FT_Face face, + FT_GlyphSlot *aslot ); + + + /************************************************************************** + * + * @function: + * FT_Done_GlyphSlot + * + * @description: + * Destroys a given glyph slot. Remember however that all slots are + * automatically destroyed with its parent. Using this function is not + * always mandatory. + * + * @input: + * slot :: + * A handle to a target glyph slot. + */ + FT_BASE( void ) + FT_Done_GlyphSlot( FT_GlyphSlot slot ); + + /* */ + +#define FT_REQUEST_WIDTH( req ) \ + ( (req)->horiResolution \ + ? ( (req)->width * (FT_Pos)(req)->horiResolution + 36 ) / 72 \ + : (req)->width ) + +#define FT_REQUEST_HEIGHT( req ) \ + ( (req)->vertResolution \ + ? ( (req)->height * (FT_Pos)(req)->vertResolution + 36 ) / 72 \ + : (req)->height ) + + + /* Set the metrics according to a bitmap strike. */ + FT_BASE( void ) + FT_Select_Metrics( FT_Face face, + FT_ULong strike_index ); + + + /* Set the metrics according to a size request. */ + FT_BASE( FT_Error ) + FT_Request_Metrics( FT_Face face, + FT_Size_Request req ); + + + /* Match a size request against `available_sizes'. */ + FT_BASE( FT_Error ) + FT_Match_Size( FT_Face face, + FT_Size_Request req, + FT_Bool ignore_width, + FT_ULong* size_index ); + + + /* Use the horizontal metrics to synthesize the vertical metrics. */ + /* If `advance' is zero, it is also synthesized. */ + FT_BASE( void ) + ft_synthesize_vertical_metrics( FT_Glyph_Metrics* metrics, + FT_Pos advance ); + + + /* Free the bitmap of a given glyphslot when needed (i.e., only when it */ + /* was allocated with ft_glyphslot_alloc_bitmap). */ + FT_BASE( void ) + ft_glyphslot_free_bitmap( FT_GlyphSlot slot ); + + + /* Preset bitmap metrics of an outline glyphslot prior to rendering */ + /* and check whether the truncated bbox is too large for rendering. */ + FT_BASE( FT_Bool ) + ft_glyphslot_preset_bitmap( FT_GlyphSlot slot, + FT_Render_Mode mode, + const FT_Vector* origin ); + + /* Allocate a new bitmap buffer in a glyph slot. */ + FT_BASE( FT_Error ) + ft_glyphslot_alloc_bitmap( FT_GlyphSlot slot, + FT_ULong size ); + + + /* Set the bitmap buffer in a glyph slot to a given pointer. The buffer */ + /* will not be freed by a later call to ft_glyphslot_free_bitmap. */ + FT_BASE( void ) + ft_glyphslot_set_bitmap( FT_GlyphSlot slot, + FT_Byte* buffer ); + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** R E N D E R E R S ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + +#define FT_RENDERER( x ) ( (FT_Renderer)(x) ) +#define FT_GLYPH( x ) ( (FT_Glyph)(x) ) +#define FT_BITMAP_GLYPH( x ) ( (FT_BitmapGlyph)(x) ) +#define FT_OUTLINE_GLYPH( x ) ( (FT_OutlineGlyph)(x) ) + + + typedef struct FT_RendererRec_ + { + FT_ModuleRec root; + FT_Renderer_Class* clazz; + FT_Glyph_Format glyph_format; + FT_Glyph_Class glyph_class; + + FT_Raster raster; + FT_Raster_Render_Func raster_render; + FT_Renderer_RenderFunc render; + + } FT_RendererRec; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** F O N T D R I V E R S ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /* typecast a module into a driver easily */ +#define FT_DRIVER( x ) ( (FT_Driver)(x) ) + + /* typecast a module as a driver, and get its driver class */ +#define FT_DRIVER_CLASS( x ) FT_DRIVER( x )->clazz + + + /************************************************************************** + * + * @struct: + * FT_DriverRec + * + * @description: + * The root font driver class. A font driver is responsible for managing + * and loading font files of a given format. + * + * @fields: + * root :: + * Contains the fields of the root module class. + * + * clazz :: + * A pointer to the font driver's class. Note that this is NOT + * root.clazz. 'class' wasn't used as it is a reserved word in C++. + * + * faces_list :: + * The list of faces currently opened by this driver. + * + * glyph_loader :: + * Unused. Used to be glyph loader for all faces managed by this + * driver. + */ + typedef struct FT_DriverRec_ + { + FT_ModuleRec root; + FT_Driver_Class clazz; + FT_ListRec faces_list; + FT_GlyphLoader glyph_loader; + + } FT_DriverRec; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** L I B R A R I E S ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * FT_LibraryRec + * + * @description: + * The FreeType library class. This is the root of all FreeType data. + * Use FT_New_Library() to create a library object, and FT_Done_Library() + * to discard it and all child objects. + * + * @fields: + * memory :: + * The library's memory object. Manages memory allocation. + * + * version_major :: + * The major version number of the library. + * + * version_minor :: + * The minor version number of the library. + * + * version_patch :: + * The current patch level of the library. + * + * num_modules :: + * The number of modules currently registered within this library. + * This is set to 0 for new libraries. New modules are added through + * the FT_Add_Module() API function. + * + * modules :: + * A table used to store handles to the currently registered + * modules. Note that each font driver contains a list of its opened + * faces. + * + * renderers :: + * The list of renderers currently registered within the library. + * + * cur_renderer :: + * The current outline renderer. This is a shortcut used to avoid + * parsing the list on each call to FT_Outline_Render(). It is a + * handle to the current renderer for the FT_GLYPH_FORMAT_OUTLINE + * format. + * + * auto_hinter :: + * The auto-hinter module interface. + * + * debug_hooks :: + * An array of four function pointers that allow debuggers to hook into + * a font format's interpreter. Currently, only the TrueType bytecode + * debugger uses this. + * + * lcd_weights :: + * The LCD filter weights for ClearType-style subpixel rendering. + * + * lcd_filter_func :: + * The LCD filtering callback function for for ClearType-style subpixel + * rendering. + * + * lcd_geometry :: + * This array specifies LCD subpixel geometry and controls Harmony LCD + * rendering technique, alternative to ClearType. + * + * pic_container :: + * Contains global structs and tables, instead of defining them + * globally. + * + * refcount :: + * A counter initialized to~1 at the time an @FT_Library structure is + * created. @FT_Reference_Library increments this counter, and + * @FT_Done_Library only destroys a library if the counter is~1, + * otherwise it simply decrements it. + */ + typedef struct FT_LibraryRec_ + { + FT_Memory memory; /* library's memory manager */ + + FT_Int version_major; + FT_Int version_minor; + FT_Int version_patch; + + FT_UInt num_modules; + FT_Module modules[FT_MAX_MODULES]; /* module objects */ + + FT_ListRec renderers; /* list of renderers */ + FT_Renderer cur_renderer; /* current outline renderer */ + FT_Module auto_hinter; + + FT_DebugHook_Func debug_hooks[4]; + +#ifdef FT_CONFIG_OPTION_SUBPIXEL_RENDERING + FT_LcdFiveTapFilter lcd_weights; /* filter weights, if any */ + FT_Bitmap_LcdFilterFunc lcd_filter_func; /* filtering callback */ +#else + FT_Vector lcd_geometry[3]; /* RGB subpixel positions */ +#endif + + FT_Int refcount; + + } FT_LibraryRec; + + + FT_BASE( FT_Renderer ) + FT_Lookup_Renderer( FT_Library library, + FT_Glyph_Format format, + FT_ListNode* node ); + + FT_BASE( FT_Error ) + FT_Render_Glyph_Internal( FT_Library library, + FT_GlyphSlot slot, + FT_Render_Mode render_mode ); + + typedef const char* + (*FT_Face_GetPostscriptNameFunc)( FT_Face face ); + + typedef FT_Error + (*FT_Face_GetGlyphNameFunc)( FT_Face face, + FT_UInt glyph_index, + FT_Pointer buffer, + FT_UInt buffer_max ); + + typedef FT_UInt + (*FT_Face_GetGlyphNameIndexFunc)( FT_Face face, + const FT_String* glyph_name ); + + +#ifndef FT_CONFIG_OPTION_NO_DEFAULT_SYSTEM + + /************************************************************************** + * + * @function: + * FT_New_Memory + * + * @description: + * Creates a new memory object. + * + * @return: + * A pointer to the new memory object. 0 in case of error. + */ + FT_BASE( FT_Memory ) + FT_New_Memory( void ); + + + /************************************************************************** + * + * @function: + * FT_Done_Memory + * + * @description: + * Discards memory manager. + * + * @input: + * memory :: + * A handle to the memory manager. + */ + FT_BASE( void ) + FT_Done_Memory( FT_Memory memory ); + +#endif /* !FT_CONFIG_OPTION_NO_DEFAULT_SYSTEM */ + + + /* Define default raster's interface. The default raster is located in */ + /* `src/base/ftraster.c'. */ + /* */ + /* Client applications can register new rasters through the */ + /* FT_Set_Raster() API. */ + +#ifndef FT_NO_DEFAULT_RASTER + FT_EXPORT_VAR( FT_Raster_Funcs ) ft_default_raster; +#endif + + + /************************************************************************** + * + * @macro: + * FT_DEFINE_OUTLINE_FUNCS + * + * @description: + * Used to initialize an instance of FT_Outline_Funcs struct. The struct + * will be allocated in the global scope (or the scope where the macro is + * used). + */ +#define FT_DEFINE_OUTLINE_FUNCS( \ + class_, \ + move_to_, \ + line_to_, \ + conic_to_, \ + cubic_to_, \ + shift_, \ + delta_ ) \ + static const FT_Outline_Funcs class_ = \ + { \ + move_to_, \ + line_to_, \ + conic_to_, \ + cubic_to_, \ + shift_, \ + delta_ \ + }; + + + /************************************************************************** + * + * @macro: + * FT_DEFINE_RASTER_FUNCS + * + * @description: + * Used to initialize an instance of FT_Raster_Funcs struct. The struct + * will be allocated in the global scope (or the scope where the macro is + * used). + */ +#define FT_DEFINE_RASTER_FUNCS( \ + class_, \ + glyph_format_, \ + raster_new_, \ + raster_reset_, \ + raster_set_mode_, \ + raster_render_, \ + raster_done_ ) \ + const FT_Raster_Funcs class_ = \ + { \ + glyph_format_, \ + raster_new_, \ + raster_reset_, \ + raster_set_mode_, \ + raster_render_, \ + raster_done_ \ + }; + + + + /************************************************************************** + * + * @macro: + * FT_DEFINE_GLYPH + * + * @description: + * The struct will be allocated in the global scope (or the scope where + * the macro is used). + */ +#define FT_DECLARE_GLYPH( class_ ) \ + FT_CALLBACK_TABLE const FT_Glyph_Class class_; + +#define FT_DEFINE_GLYPH( \ + class_, \ + size_, \ + format_, \ + init_, \ + done_, \ + copy_, \ + transform_, \ + bbox_, \ + prepare_ ) \ + FT_CALLBACK_TABLE_DEF \ + const FT_Glyph_Class class_ = \ + { \ + size_, \ + format_, \ + init_, \ + done_, \ + copy_, \ + transform_, \ + bbox_, \ + prepare_ \ + }; + + + /************************************************************************** + * + * @macro: + * FT_DECLARE_RENDERER + * + * @description: + * Used to create a forward declaration of a FT_Renderer_Class struct + * instance. + * + * @macro: + * FT_DEFINE_RENDERER + * + * @description: + * Used to initialize an instance of FT_Renderer_Class struct. + * + * The struct will be allocated in the global scope (or the scope where + * the macro is used). + */ +#define FT_DECLARE_RENDERER( class_ ) \ + FT_EXPORT_VAR( const FT_Renderer_Class ) class_; + +#define FT_DEFINE_RENDERER( \ + class_, \ + flags_, \ + size_, \ + name_, \ + version_, \ + requires_, \ + interface_, \ + init_, \ + done_, \ + get_interface_, \ + glyph_format_, \ + render_glyph_, \ + transform_glyph_, \ + get_glyph_cbox_, \ + set_mode_, \ + raster_class_ ) \ + FT_CALLBACK_TABLE_DEF \ + const FT_Renderer_Class class_ = \ + { \ + FT_DEFINE_ROOT_MODULE( flags_, \ + size_, \ + name_, \ + version_, \ + requires_, \ + interface_, \ + init_, \ + done_, \ + get_interface_ ) \ + glyph_format_, \ + \ + render_glyph_, \ + transform_glyph_, \ + get_glyph_cbox_, \ + set_mode_, \ + \ + raster_class_ \ + }; + + + /************************************************************************** + * + * @macro: + * FT_DECLARE_MODULE + * + * @description: + * Used to create a forward declaration of a FT_Module_Class struct + * instance. + * + * @macro: + * FT_DEFINE_MODULE + * + * @description: + * Used to initialize an instance of an FT_Module_Class struct. + * + * The struct will be allocated in the global scope (or the scope where + * the macro is used). + * + * @macro: + * FT_DEFINE_ROOT_MODULE + * + * @description: + * Used to initialize an instance of an FT_Module_Class struct inside + * another struct that contains it or in a function that initializes that + * containing struct. + */ +#define FT_DECLARE_MODULE( class_ ) \ + FT_CALLBACK_TABLE \ + const FT_Module_Class class_; + +#define FT_DEFINE_ROOT_MODULE( \ + flags_, \ + size_, \ + name_, \ + version_, \ + requires_, \ + interface_, \ + init_, \ + done_, \ + get_interface_ ) \ + { \ + flags_, \ + size_, \ + \ + name_, \ + version_, \ + requires_, \ + \ + interface_, \ + \ + init_, \ + done_, \ + get_interface_, \ + }, + +#define FT_DEFINE_MODULE( \ + class_, \ + flags_, \ + size_, \ + name_, \ + version_, \ + requires_, \ + interface_, \ + init_, \ + done_, \ + get_interface_ ) \ + FT_CALLBACK_TABLE_DEF \ + const FT_Module_Class class_ = \ + { \ + flags_, \ + size_, \ + \ + name_, \ + version_, \ + requires_, \ + \ + interface_, \ + \ + init_, \ + done_, \ + get_interface_, \ + }; + + +FT_END_HEADER + +#endif /* FTOBJS_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftpsprop.h b/Example/include/freetype/internal/ftpsprop.h new file mode 100644 index 0000000..1d5b287 --- /dev/null +++ b/Example/include/freetype/internal/ftpsprop.h @@ -0,0 +1,47 @@ +/**************************************************************************** + * + * ftpsprop.h + * + * Get and set properties of PostScript drivers (specification). + * + * Copyright (C) 2017-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTPSPROP_H_ +#define FTPSPROP_H_ + + +#include + + +FT_BEGIN_HEADER + + + FT_BASE_CALLBACK( FT_Error ) + ps_property_set( FT_Module module, /* PS_Driver */ + const char* property_name, + const void* value, + FT_Bool value_is_string ); + + FT_BASE_CALLBACK( FT_Error ) + ps_property_get( FT_Module module, /* PS_Driver */ + const char* property_name, + void* value ); + + +FT_END_HEADER + + +#endif /* FTPSPROP_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftrfork.h b/Example/include/freetype/internal/ftrfork.h new file mode 100644 index 0000000..e964599 --- /dev/null +++ b/Example/include/freetype/internal/ftrfork.h @@ -0,0 +1,245 @@ +/**************************************************************************** + * + * ftrfork.h + * + * Embedded resource forks accessor (specification). + * + * Copyright (C) 2004-2023 by + * Masatake YAMATO and Redhat K.K. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + +/**************************************************************************** + * Development of the code in this file is support of + * Information-technology Promotion Agency, Japan. + */ + + +#ifndef FTRFORK_H_ +#define FTRFORK_H_ + + +#include + + +FT_BEGIN_HEADER + + + /* Number of guessing rules supported in `FT_Raccess_Guess'. */ + /* Don't forget to increment the number if you add a new guessing rule. */ +#define FT_RACCESS_N_RULES 9 + + + /* A structure to describe a reference in a resource by its resource ID */ + /* and internal offset. The `POST' resource expects to be concatenated */ + /* by the order of resource IDs instead of its appearance in the file. */ + + typedef struct FT_RFork_Ref_ + { + FT_Short res_id; + FT_Long offset; + + } FT_RFork_Ref; + + +#ifdef FT_CONFIG_OPTION_GUESSING_EMBEDDED_RFORK + typedef FT_Error + (*ft_raccess_guess_func)( FT_Library library, + FT_Stream stream, + char *base_file_name, + char **result_file_name, + FT_Long *result_offset ); + + typedef enum FT_RFork_Rule_ { + FT_RFork_Rule_invalid = -2, + FT_RFork_Rule_uknown, /* -1 */ + FT_RFork_Rule_apple_double, + FT_RFork_Rule_apple_single, + FT_RFork_Rule_darwin_ufs_export, + FT_RFork_Rule_darwin_newvfs, + FT_RFork_Rule_darwin_hfsplus, + FT_RFork_Rule_vfat, + FT_RFork_Rule_linux_cap, + FT_RFork_Rule_linux_double, + FT_RFork_Rule_linux_netatalk + } FT_RFork_Rule; + + /* For fast translation between rule index and rule type, + * the macros FT_RFORK_xxx should be kept consistent with the + * raccess_guess_funcs table + */ + typedef struct ft_raccess_guess_rec_ { + ft_raccess_guess_func func; + FT_RFork_Rule type; + } ft_raccess_guess_rec; + + +#define CONST_FT_RFORK_RULE_ARRAY_BEGIN( name, type ) \ + static const type name[] = { +#define CONST_FT_RFORK_RULE_ARRAY_ENTRY( func_suffix, type_suffix ) \ + { raccess_guess_ ## func_suffix, \ + FT_RFork_Rule_ ## type_suffix }, + /* this array is a storage, thus a final `;' is needed */ +#define CONST_FT_RFORK_RULE_ARRAY_END }; + +#endif /* FT_CONFIG_OPTION_GUESSING_EMBEDDED_RFORK */ + + + /************************************************************************** + * + * @function: + * FT_Raccess_Guess + * + * @description: + * Guess a file name and offset where the actual resource fork is stored. + * The macro FT_RACCESS_N_RULES holds the number of guessing rules; the + * guessed result for the Nth rule is represented as a triplet: a new + * file name (new_names[N]), a file offset (offsets[N]), and an error + * code (errors[N]). + * + * @input: + * library :: + * A FreeType library instance. + * + * stream :: + * A file stream containing the resource fork. + * + * base_name :: + * The (base) file name of the resource fork used for some guessing + * rules. + * + * @output: + * new_names :: + * An array of guessed file names in which the resource forks may + * exist. If 'new_names[N]' is `NULL`, the guessed file name is equal + * to `base_name`. + * + * offsets :: + * An array of guessed file offsets. 'offsets[N]' holds the file + * offset of the possible start of the resource fork in file + * 'new_names[N]'. + * + * errors :: + * An array of FreeType error codes. 'errors[N]' is the error code of + * Nth guessing rule function. If 'errors[N]' is not FT_Err_Ok, + * 'new_names[N]' and 'offsets[N]' are meaningless. + */ + FT_BASE( void ) + FT_Raccess_Guess( FT_Library library, + FT_Stream stream, + char* base_name, + char** new_names, + FT_Long* offsets, + FT_Error* errors ); + + + /************************************************************************** + * + * @function: + * FT_Raccess_Get_HeaderInfo + * + * @description: + * Get the information from the header of resource fork. The information + * includes the file offset where the resource map starts, and the file + * offset where the resource data starts. `FT_Raccess_Get_DataOffsets` + * requires these two data. + * + * @input: + * library :: + * A FreeType library instance. + * + * stream :: + * A file stream containing the resource fork. + * + * rfork_offset :: + * The file offset where the resource fork starts. + * + * @output: + * map_offset :: + * The file offset where the resource map starts. + * + * rdata_pos :: + * The file offset where the resource data starts. + * + * @return: + * FreeType error code. FT_Err_Ok means success. + */ + FT_BASE( FT_Error ) + FT_Raccess_Get_HeaderInfo( FT_Library library, + FT_Stream stream, + FT_Long rfork_offset, + FT_Long *map_offset, + FT_Long *rdata_pos ); + + + /************************************************************************** + * + * @function: + * FT_Raccess_Get_DataOffsets + * + * @description: + * Get the data offsets for a tag in a resource fork. Offsets are stored + * in an array because, in some cases, resources in a resource fork have + * the same tag. + * + * @input: + * library :: + * A FreeType library instance. + * + * stream :: + * A file stream containing the resource fork. + * + * map_offset :: + * The file offset where the resource map starts. + * + * rdata_pos :: + * The file offset where the resource data starts. + * + * tag :: + * The resource tag. + * + * sort_by_res_id :: + * A Boolean to sort the fragmented resource by their ids. The + * fragmented resources for 'POST' resource should be sorted to restore + * Type1 font properly. For 'sfnt' resources, sorting may induce a + * different order of the faces in comparison to that by QuickDraw API. + * + * @output: + * offsets :: + * The stream offsets for the resource data specified by 'tag'. This + * array is allocated by the function, so you have to call @ft_mem_free + * after use. + * + * count :: + * The length of offsets array. + * + * @return: + * FreeType error code. FT_Err_Ok means success. + * + * @note: + * Normally you should use `FT_Raccess_Get_HeaderInfo` to get the value + * for `map_offset` and `rdata_pos`. + */ + FT_BASE( FT_Error ) + FT_Raccess_Get_DataOffsets( FT_Library library, + FT_Stream stream, + FT_Long map_offset, + FT_Long rdata_pos, + FT_Long tag, + FT_Bool sort_by_res_id, + FT_Long **offsets, + FT_Long *count ); + + +FT_END_HEADER + +#endif /* FTRFORK_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftserv.h b/Example/include/freetype/internal/ftserv.h new file mode 100644 index 0000000..1e85d6d --- /dev/null +++ b/Example/include/freetype/internal/ftserv.h @@ -0,0 +1,495 @@ +/**************************************************************************** + * + * ftserv.h + * + * The FreeType services (specification only). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + /************************************************************************** + * + * Each module can export one or more 'services'. Each service is + * identified by a constant string and modeled by a pointer; the latter + * generally corresponds to a structure containing function pointers. + * + * Note that a service's data cannot be a mere function pointer because in + * C it is possible that function pointers might be implemented differently + * than data pointers (e.g. 48 bits instead of 32). + * + */ + + +#ifndef FTSERV_H_ +#define FTSERV_H_ + +#include "compiler-macros.h" + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @macro: + * FT_FACE_FIND_SERVICE + * + * @description: + * This macro is used to look up a service from a face's driver module. + * + * @input: + * face :: + * The source face handle. + * + * id :: + * A string describing the service as defined in the service's header + * files (e.g. FT_SERVICE_ID_MULTI_MASTERS which expands to + * 'multi-masters'). It is automatically prefixed with + * `FT_SERVICE_ID_`. + * + * @output: + * ptr :: + * A variable that receives the service pointer. Will be `NULL` if not + * found. + */ +#ifdef __cplusplus + +#define FT_FACE_FIND_SERVICE( face, ptr, id ) \ + FT_BEGIN_STMNT \ + FT_Module module = FT_MODULE( FT_FACE( face )->driver ); \ + FT_Pointer _tmp_ = NULL; \ + FT_Pointer* _pptr_ = (FT_Pointer*)&(ptr); \ + \ + \ + if ( module->clazz->get_interface ) \ + _tmp_ = module->clazz->get_interface( module, FT_SERVICE_ID_ ## id ); \ + *_pptr_ = _tmp_; \ + FT_END_STMNT + +#else /* !C++ */ + +#define FT_FACE_FIND_SERVICE( face, ptr, id ) \ + FT_BEGIN_STMNT \ + FT_Module module = FT_MODULE( FT_FACE( face )->driver ); \ + FT_Pointer _tmp_ = NULL; \ + \ + if ( module->clazz->get_interface ) \ + _tmp_ = module->clazz->get_interface( module, FT_SERVICE_ID_ ## id ); \ + ptr = _tmp_; \ + FT_END_STMNT + +#endif /* !C++ */ + + + /************************************************************************** + * + * @macro: + * FT_FACE_FIND_GLOBAL_SERVICE + * + * @description: + * This macro is used to look up a service from all modules. + * + * @input: + * face :: + * The source face handle. + * + * id :: + * A string describing the service as defined in the service's header + * files (e.g. FT_SERVICE_ID_MULTI_MASTERS which expands to + * 'multi-masters'). It is automatically prefixed with + * `FT_SERVICE_ID_`. + * + * @output: + * ptr :: + * A variable that receives the service pointer. Will be `NULL` if not + * found. + */ +#ifdef __cplusplus + +#define FT_FACE_FIND_GLOBAL_SERVICE( face, ptr, id ) \ + FT_BEGIN_STMNT \ + FT_Module module = FT_MODULE( FT_FACE( face )->driver ); \ + FT_Pointer _tmp_; \ + FT_Pointer* _pptr_ = (FT_Pointer*)&(ptr); \ + \ + \ + _tmp_ = ft_module_get_service( module, FT_SERVICE_ID_ ## id, 1 ); \ + *_pptr_ = _tmp_; \ + FT_END_STMNT + +#else /* !C++ */ + +#define FT_FACE_FIND_GLOBAL_SERVICE( face, ptr, id ) \ + FT_BEGIN_STMNT \ + FT_Module module = FT_MODULE( FT_FACE( face )->driver ); \ + FT_Pointer _tmp_; \ + \ + \ + _tmp_ = ft_module_get_service( module, FT_SERVICE_ID_ ## id, 1 ); \ + ptr = _tmp_; \ + FT_END_STMNT + +#endif /* !C++ */ + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** S E R V I C E D E S C R I P T O R S *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + /* + * The following structure is used to _describe_ a given service to the + * library. This is useful to build simple static service lists. + */ + typedef struct FT_ServiceDescRec_ + { + const char* serv_id; /* service name */ + const void* serv_data; /* service pointer/data */ + + } FT_ServiceDescRec; + + typedef const FT_ServiceDescRec* FT_ServiceDesc; + + + /************************************************************************** + * + * @macro: + * FT_DEFINE_SERVICEDESCREC1 + * FT_DEFINE_SERVICEDESCREC2 + * FT_DEFINE_SERVICEDESCREC3 + * FT_DEFINE_SERVICEDESCREC4 + * FT_DEFINE_SERVICEDESCREC5 + * FT_DEFINE_SERVICEDESCREC6 + * FT_DEFINE_SERVICEDESCREC7 + * FT_DEFINE_SERVICEDESCREC8 + * FT_DEFINE_SERVICEDESCREC9 + * FT_DEFINE_SERVICEDESCREC10 + * + * @description: + * Used to initialize an array of FT_ServiceDescRec structures. + * + * The array will be allocated in the global scope (or the scope where + * the macro is used). + */ +#define FT_DEFINE_SERVICEDESCREC1( class_, \ + serv_id_1, serv_data_1 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC2( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC3( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC4( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3, \ + serv_id_4, serv_data_4 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { serv_id_4, serv_data_4 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC5( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3, \ + serv_id_4, serv_data_4, \ + serv_id_5, serv_data_5 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { serv_id_4, serv_data_4 }, \ + { serv_id_5, serv_data_5 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC6( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3, \ + serv_id_4, serv_data_4, \ + serv_id_5, serv_data_5, \ + serv_id_6, serv_data_6 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { serv_id_4, serv_data_4 }, \ + { serv_id_5, serv_data_5 }, \ + { serv_id_6, serv_data_6 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC7( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3, \ + serv_id_4, serv_data_4, \ + serv_id_5, serv_data_5, \ + serv_id_6, serv_data_6, \ + serv_id_7, serv_data_7 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { serv_id_4, serv_data_4 }, \ + { serv_id_5, serv_data_5 }, \ + { serv_id_6, serv_data_6 }, \ + { serv_id_7, serv_data_7 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC8( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3, \ + serv_id_4, serv_data_4, \ + serv_id_5, serv_data_5, \ + serv_id_6, serv_data_6, \ + serv_id_7, serv_data_7, \ + serv_id_8, serv_data_8 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { serv_id_4, serv_data_4 }, \ + { serv_id_5, serv_data_5 }, \ + { serv_id_6, serv_data_6 }, \ + { serv_id_7, serv_data_7 }, \ + { serv_id_8, serv_data_8 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC9( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3, \ + serv_id_4, serv_data_4, \ + serv_id_5, serv_data_5, \ + serv_id_6, serv_data_6, \ + serv_id_7, serv_data_7, \ + serv_id_8, serv_data_8, \ + serv_id_9, serv_data_9 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { serv_id_4, serv_data_4 }, \ + { serv_id_5, serv_data_5 }, \ + { serv_id_6, serv_data_6 }, \ + { serv_id_7, serv_data_7 }, \ + { serv_id_8, serv_data_8 }, \ + { serv_id_9, serv_data_9 }, \ + { NULL, NULL } \ + }; + +#define FT_DEFINE_SERVICEDESCREC10( class_, \ + serv_id_1, serv_data_1, \ + serv_id_2, serv_data_2, \ + serv_id_3, serv_data_3, \ + serv_id_4, serv_data_4, \ + serv_id_5, serv_data_5, \ + serv_id_6, serv_data_6, \ + serv_id_7, serv_data_7, \ + serv_id_8, serv_data_8, \ + serv_id_9, serv_data_9, \ + serv_id_10, serv_data_10 ) \ + static const FT_ServiceDescRec class_[] = \ + { \ + { serv_id_1, serv_data_1 }, \ + { serv_id_2, serv_data_2 }, \ + { serv_id_3, serv_data_3 }, \ + { serv_id_4, serv_data_4 }, \ + { serv_id_5, serv_data_5 }, \ + { serv_id_6, serv_data_6 }, \ + { serv_id_7, serv_data_7 }, \ + { serv_id_8, serv_data_8 }, \ + { serv_id_9, serv_data_9 }, \ + { serv_id_10, serv_data_10 }, \ + { NULL, NULL } \ + }; + + + /* + * Parse a list of FT_ServiceDescRec descriptors and look for a specific + * service by ID. Note that the last element in the array must be { NULL, + * NULL }, and that the function should return NULL if the service isn't + * available. + * + * This function can be used by modules to implement their `get_service' + * method. + */ + FT_BASE( FT_Pointer ) + ft_service_list_lookup( FT_ServiceDesc service_descriptors, + const char* service_id ); + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** S E R V I C E S C A C H E *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + /* + * This structure is used to store a cache for several frequently used + * services. It is the type of `face->internal->services'. You should + * only use FT_FACE_LOOKUP_SERVICE to access it. + * + * All fields should have the type FT_Pointer to relax compilation + * dependencies. We assume the developer isn't completely stupid. + * + * Each field must be named `service_XXXX' where `XXX' corresponds to the + * correct FT_SERVICE_ID_XXXX macro. See the definition of + * FT_FACE_LOOKUP_SERVICE below how this is implemented. + * + */ + typedef struct FT_ServiceCacheRec_ + { + FT_Pointer service_POSTSCRIPT_FONT_NAME; + FT_Pointer service_MULTI_MASTERS; + FT_Pointer service_METRICS_VARIATIONS; + FT_Pointer service_GLYPH_DICT; + FT_Pointer service_PFR_METRICS; + FT_Pointer service_WINFNT; + + } FT_ServiceCacheRec, *FT_ServiceCache; + + + /* + * A magic number used within the services cache. + */ + + /* ensure that value `1' has the same width as a pointer */ +#define FT_SERVICE_UNAVAILABLE ((FT_Pointer)~(FT_PtrDist)1) + + + /************************************************************************** + * + * @macro: + * FT_FACE_LOOKUP_SERVICE + * + * @description: + * This macro is used to look up a service from a face's driver module + * using its cache. + * + * @input: + * face :: + * The source face handle containing the cache. + * + * field :: + * The field name in the cache. + * + * id :: + * The service ID. + * + * @output: + * ptr :: + * A variable receiving the service data. `NULL` if not available. + */ +#ifdef __cplusplus + +#define FT_FACE_LOOKUP_SERVICE( face, ptr, id ) \ + FT_BEGIN_STMNT \ + FT_Pointer svc; \ + FT_Pointer* Pptr = (FT_Pointer*)&(ptr); \ + \ + \ + svc = FT_FACE( face )->internal->services. service_ ## id; \ + if ( svc == FT_SERVICE_UNAVAILABLE ) \ + svc = NULL; \ + else if ( svc == NULL ) \ + { \ + FT_FACE_FIND_SERVICE( face, svc, id ); \ + \ + FT_FACE( face )->internal->services. service_ ## id = \ + (FT_Pointer)( svc != NULL ? svc \ + : FT_SERVICE_UNAVAILABLE ); \ + } \ + *Pptr = svc; \ + FT_END_STMNT + +#else /* !C++ */ + +#define FT_FACE_LOOKUP_SERVICE( face, ptr, id ) \ + FT_BEGIN_STMNT \ + FT_Pointer svc; \ + \ + \ + svc = FT_FACE( face )->internal->services. service_ ## id; \ + if ( svc == FT_SERVICE_UNAVAILABLE ) \ + svc = NULL; \ + else if ( svc == NULL ) \ + { \ + FT_FACE_FIND_SERVICE( face, svc, id ); \ + \ + FT_FACE( face )->internal->services. service_ ## id = \ + (FT_Pointer)( svc != NULL ? svc \ + : FT_SERVICE_UNAVAILABLE ); \ + } \ + ptr = svc; \ + FT_END_STMNT + +#endif /* !C++ */ + + /* + * A macro used to define new service structure types. + */ + +#define FT_DEFINE_SERVICE( name ) \ + typedef struct FT_Service_ ## name ## Rec_ \ + FT_Service_ ## name ## Rec ; \ + typedef struct FT_Service_ ## name ## Rec_ \ + const * FT_Service_ ## name ; \ + struct FT_Service_ ## name ## Rec_ + + /* */ + +FT_END_HEADER + +#endif /* FTSERV_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/ftstream.h b/Example/include/freetype/internal/ftstream.h new file mode 100644 index 0000000..88e1928 --- /dev/null +++ b/Example/include/freetype/internal/ftstream.h @@ -0,0 +1,570 @@ +/**************************************************************************** + * + * ftstream.h + * + * Stream handling (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTSTREAM_H_ +#define FTSTREAM_H_ + + +#include +#include +#include + + +FT_BEGIN_HEADER + + + /* format of an 8-bit frame_op value: */ + /* */ + /* bit 76543210 */ + /* xxxxxxes */ + /* */ + /* s is set to 1 if the value is signed. */ + /* e is set to 1 if the value is little-endian. */ + /* xxx is a command. */ + +#define FT_FRAME_OP_SHIFT 2 +#define FT_FRAME_OP_SIGNED 1 +#define FT_FRAME_OP_LITTLE 2 +#define FT_FRAME_OP_COMMAND( x ) ( x >> FT_FRAME_OP_SHIFT ) + +#define FT_MAKE_FRAME_OP( command, little, sign ) \ + ( ( command << FT_FRAME_OP_SHIFT ) | ( little << 1 ) | sign ) + +#define FT_FRAME_OP_END 0 +#define FT_FRAME_OP_START 1 /* start a new frame */ +#define FT_FRAME_OP_BYTE 2 /* read 1-byte value */ +#define FT_FRAME_OP_SHORT 3 /* read 2-byte value */ +#define FT_FRAME_OP_LONG 4 /* read 4-byte value */ +#define FT_FRAME_OP_OFF3 5 /* read 3-byte value */ +#define FT_FRAME_OP_BYTES 6 /* read a bytes sequence */ + + + typedef enum FT_Frame_Op_ + { + ft_frame_end = 0, + ft_frame_start = FT_MAKE_FRAME_OP( FT_FRAME_OP_START, 0, 0 ), + + ft_frame_byte = FT_MAKE_FRAME_OP( FT_FRAME_OP_BYTE, 0, 0 ), + ft_frame_schar = FT_MAKE_FRAME_OP( FT_FRAME_OP_BYTE, 0, 1 ), + + ft_frame_ushort_be = FT_MAKE_FRAME_OP( FT_FRAME_OP_SHORT, 0, 0 ), + ft_frame_short_be = FT_MAKE_FRAME_OP( FT_FRAME_OP_SHORT, 0, 1 ), + ft_frame_ushort_le = FT_MAKE_FRAME_OP( FT_FRAME_OP_SHORT, 1, 0 ), + ft_frame_short_le = FT_MAKE_FRAME_OP( FT_FRAME_OP_SHORT, 1, 1 ), + + ft_frame_ulong_be = FT_MAKE_FRAME_OP( FT_FRAME_OP_LONG, 0, 0 ), + ft_frame_long_be = FT_MAKE_FRAME_OP( FT_FRAME_OP_LONG, 0, 1 ), + ft_frame_ulong_le = FT_MAKE_FRAME_OP( FT_FRAME_OP_LONG, 1, 0 ), + ft_frame_long_le = FT_MAKE_FRAME_OP( FT_FRAME_OP_LONG, 1, 1 ), + + ft_frame_uoff3_be = FT_MAKE_FRAME_OP( FT_FRAME_OP_OFF3, 0, 0 ), + ft_frame_off3_be = FT_MAKE_FRAME_OP( FT_FRAME_OP_OFF3, 0, 1 ), + ft_frame_uoff3_le = FT_MAKE_FRAME_OP( FT_FRAME_OP_OFF3, 1, 0 ), + ft_frame_off3_le = FT_MAKE_FRAME_OP( FT_FRAME_OP_OFF3, 1, 1 ), + + ft_frame_bytes = FT_MAKE_FRAME_OP( FT_FRAME_OP_BYTES, 0, 0 ), + ft_frame_skip = FT_MAKE_FRAME_OP( FT_FRAME_OP_BYTES, 0, 1 ) + + } FT_Frame_Op; + + + typedef struct FT_Frame_Field_ + { + FT_Byte value; + FT_Byte size; + FT_UShort offset; + + } FT_Frame_Field; + + + /* Construct an FT_Frame_Field out of a structure type and a field name. */ + /* The structure type must be set in the FT_STRUCTURE macro before */ + /* calling the FT_FRAME_START() macro. */ + /* */ +#define FT_FIELD_SIZE( f ) \ + (FT_Byte)sizeof ( ((FT_STRUCTURE*)0)->f ) + +#define FT_FIELD_SIZE_DELTA( f ) \ + (FT_Byte)sizeof ( ((FT_STRUCTURE*)0)->f[0] ) + +#define FT_FIELD_OFFSET( f ) \ + (FT_UShort)( offsetof( FT_STRUCTURE, f ) ) + +#define FT_FRAME_FIELD( frame_op, field ) \ + { \ + frame_op, \ + FT_FIELD_SIZE( field ), \ + FT_FIELD_OFFSET( field ) \ + } + +#define FT_MAKE_EMPTY_FIELD( frame_op ) { frame_op, 0, 0 } + +#define FT_FRAME_START( size ) { ft_frame_start, 0, size } +#define FT_FRAME_END { ft_frame_end, 0, 0 } + +#define FT_FRAME_LONG( f ) FT_FRAME_FIELD( ft_frame_long_be, f ) +#define FT_FRAME_ULONG( f ) FT_FRAME_FIELD( ft_frame_ulong_be, f ) +#define FT_FRAME_SHORT( f ) FT_FRAME_FIELD( ft_frame_short_be, f ) +#define FT_FRAME_USHORT( f ) FT_FRAME_FIELD( ft_frame_ushort_be, f ) +#define FT_FRAME_OFF3( f ) FT_FRAME_FIELD( ft_frame_off3_be, f ) +#define FT_FRAME_UOFF3( f ) FT_FRAME_FIELD( ft_frame_uoff3_be, f ) +#define FT_FRAME_BYTE( f ) FT_FRAME_FIELD( ft_frame_byte, f ) +#define FT_FRAME_CHAR( f ) FT_FRAME_FIELD( ft_frame_schar, f ) + +#define FT_FRAME_LONG_LE( f ) FT_FRAME_FIELD( ft_frame_long_le, f ) +#define FT_FRAME_ULONG_LE( f ) FT_FRAME_FIELD( ft_frame_ulong_le, f ) +#define FT_FRAME_SHORT_LE( f ) FT_FRAME_FIELD( ft_frame_short_le, f ) +#define FT_FRAME_USHORT_LE( f ) FT_FRAME_FIELD( ft_frame_ushort_le, f ) +#define FT_FRAME_OFF3_LE( f ) FT_FRAME_FIELD( ft_frame_off3_le, f ) +#define FT_FRAME_UOFF3_LE( f ) FT_FRAME_FIELD( ft_frame_uoff3_le, f ) + +#define FT_FRAME_SKIP_LONG { ft_frame_long_be, 0, 0 } +#define FT_FRAME_SKIP_SHORT { ft_frame_short_be, 0, 0 } +#define FT_FRAME_SKIP_BYTE { ft_frame_byte, 0, 0 } + +#define FT_FRAME_BYTES( field, count ) \ + { \ + ft_frame_bytes, \ + count, \ + FT_FIELD_OFFSET( field ) \ + } + +#define FT_FRAME_SKIP_BYTES( count ) { ft_frame_skip, count, 0 } + + + /************************************************************************** + * + * Integer extraction macros -- the 'buffer' parameter must ALWAYS be of + * type 'char*' or equivalent (1-byte elements). + */ + +#define FT_BYTE_( p, i ) ( ((const FT_Byte*)(p))[(i)] ) + +#define FT_INT16( x ) ( (FT_Int16)(x) ) +#define FT_UINT16( x ) ( (FT_UInt16)(x) ) +#define FT_INT32( x ) ( (FT_Int32)(x) ) +#define FT_UINT32( x ) ( (FT_UInt32)(x) ) + + +#define FT_BYTE_U16( p, i, s ) ( FT_UINT16( FT_BYTE_( p, i ) ) << (s) ) +#define FT_BYTE_U32( p, i, s ) ( FT_UINT32( FT_BYTE_( p, i ) ) << (s) ) + + + /* + * function acts on increases does range for emits + * pointer checking frames error + * ------------------------------------------------------------------- + * FT_PEEK_XXX buffer pointer no no no no + * FT_NEXT_XXX buffer pointer yes no no no + * FT_GET_XXX stream->cursor yes yes yes no + * FT_READ_XXX stream->pos yes yes no yes + */ + + + /* + * `FT_PEEK_XXX' are generic macros to get data from a buffer position. No + * safety checks are performed. + */ +#define FT_PEEK_SHORT( p ) FT_INT16( FT_BYTE_U16( p, 0, 8 ) | \ + FT_BYTE_U16( p, 1, 0 ) ) + +#define FT_PEEK_USHORT( p ) FT_UINT16( FT_BYTE_U16( p, 0, 8 ) | \ + FT_BYTE_U16( p, 1, 0 ) ) + +#define FT_PEEK_LONG( p ) FT_INT32( FT_BYTE_U32( p, 0, 24 ) | \ + FT_BYTE_U32( p, 1, 16 ) | \ + FT_BYTE_U32( p, 2, 8 ) | \ + FT_BYTE_U32( p, 3, 0 ) ) + +#define FT_PEEK_ULONG( p ) FT_UINT32( FT_BYTE_U32( p, 0, 24 ) | \ + FT_BYTE_U32( p, 1, 16 ) | \ + FT_BYTE_U32( p, 2, 8 ) | \ + FT_BYTE_U32( p, 3, 0 ) ) + +#define FT_PEEK_OFF3( p ) ( FT_INT32( FT_BYTE_U32( p, 0, 24 ) | \ + FT_BYTE_U32( p, 1, 16 ) | \ + FT_BYTE_U32( p, 2, 8 ) ) >> 8 ) + +#define FT_PEEK_UOFF3( p ) FT_UINT32( FT_BYTE_U32( p, 0, 16 ) | \ + FT_BYTE_U32( p, 1, 8 ) | \ + FT_BYTE_U32( p, 2, 0 ) ) + +#define FT_PEEK_SHORT_LE( p ) FT_INT16( FT_BYTE_U16( p, 1, 8 ) | \ + FT_BYTE_U16( p, 0, 0 ) ) + +#define FT_PEEK_USHORT_LE( p ) FT_UINT16( FT_BYTE_U16( p, 1, 8 ) | \ + FT_BYTE_U16( p, 0, 0 ) ) + +#define FT_PEEK_LONG_LE( p ) FT_INT32( FT_BYTE_U32( p, 3, 24 ) | \ + FT_BYTE_U32( p, 2, 16 ) | \ + FT_BYTE_U32( p, 1, 8 ) | \ + FT_BYTE_U32( p, 0, 0 ) ) + +#define FT_PEEK_ULONG_LE( p ) FT_UINT32( FT_BYTE_U32( p, 3, 24 ) | \ + FT_BYTE_U32( p, 2, 16 ) | \ + FT_BYTE_U32( p, 1, 8 ) | \ + FT_BYTE_U32( p, 0, 0 ) ) + +#define FT_PEEK_OFF3_LE( p ) ( FT_INT32( FT_BYTE_U32( p, 2, 24 ) | \ + FT_BYTE_U32( p, 1, 16 ) | \ + FT_BYTE_U32( p, 0, 8 ) ) >> 8 ) + +#define FT_PEEK_UOFF3_LE( p ) FT_UINT32( FT_BYTE_U32( p, 2, 16 ) | \ + FT_BYTE_U32( p, 1, 8 ) | \ + FT_BYTE_U32( p, 0, 0 ) ) + + /* + * `FT_NEXT_XXX' are generic macros to get data from a buffer position + * which is then increased appropriately. No safety checks are performed. + */ +#define FT_NEXT_CHAR( buffer ) \ + ( (signed char)*buffer++ ) + +#define FT_NEXT_BYTE( buffer ) \ + ( (unsigned char)*buffer++ ) + +#define FT_NEXT_SHORT( buffer ) \ + ( buffer += 2, FT_PEEK_SHORT( buffer - 2 ) ) + +#define FT_NEXT_USHORT( buffer ) \ + ( buffer += 2, FT_PEEK_USHORT( buffer - 2 ) ) + +#define FT_NEXT_OFF3( buffer ) \ + ( buffer += 3, FT_PEEK_OFF3( buffer - 3 ) ) + +#define FT_NEXT_UOFF3( buffer ) \ + ( buffer += 3, FT_PEEK_UOFF3( buffer - 3 ) ) + +#define FT_NEXT_LONG( buffer ) \ + ( buffer += 4, FT_PEEK_LONG( buffer - 4 ) ) + +#define FT_NEXT_ULONG( buffer ) \ + ( buffer += 4, FT_PEEK_ULONG( buffer - 4 ) ) + + +#define FT_NEXT_SHORT_LE( buffer ) \ + ( buffer += 2, FT_PEEK_SHORT_LE( buffer - 2 ) ) + +#define FT_NEXT_USHORT_LE( buffer ) \ + ( buffer += 2, FT_PEEK_USHORT_LE( buffer - 2 ) ) + +#define FT_NEXT_OFF3_LE( buffer ) \ + ( buffer += 3, FT_PEEK_OFF3_LE( buffer - 3 ) ) + +#define FT_NEXT_UOFF3_LE( buffer ) \ + ( buffer += 3, FT_PEEK_UOFF3_LE( buffer - 3 ) ) + +#define FT_NEXT_LONG_LE( buffer ) \ + ( buffer += 4, FT_PEEK_LONG_LE( buffer - 4 ) ) + +#define FT_NEXT_ULONG_LE( buffer ) \ + ( buffer += 4, FT_PEEK_ULONG_LE( buffer - 4 ) ) + + + /************************************************************************** + * + * The `FT_GET_XXX` macros use an implicit 'stream' variable. + * + * Note that a call to `FT_STREAM_SEEK` or `FT_STREAM_POS` has **no** + * effect on `FT_GET_XXX`! They operate on `stream->pos`, while + * `FT_GET_XXX` use `stream->cursor`. + */ +#if 0 +#define FT_GET_MACRO( type ) FT_NEXT_ ## type ( stream->cursor ) + +#define FT_GET_CHAR() FT_GET_MACRO( CHAR ) +#define FT_GET_BYTE() FT_GET_MACRO( BYTE ) +#define FT_GET_SHORT() FT_GET_MACRO( SHORT ) +#define FT_GET_USHORT() FT_GET_MACRO( USHORT ) +#define FT_GET_OFF3() FT_GET_MACRO( OFF3 ) +#define FT_GET_UOFF3() FT_GET_MACRO( UOFF3 ) +#define FT_GET_LONG() FT_GET_MACRO( LONG ) +#define FT_GET_ULONG() FT_GET_MACRO( ULONG ) +#define FT_GET_TAG4() FT_GET_MACRO( ULONG ) + +#define FT_GET_SHORT_LE() FT_GET_MACRO( SHORT_LE ) +#define FT_GET_USHORT_LE() FT_GET_MACRO( USHORT_LE ) +#define FT_GET_LONG_LE() FT_GET_MACRO( LONG_LE ) +#define FT_GET_ULONG_LE() FT_GET_MACRO( ULONG_LE ) + +#else +#define FT_GET_MACRO( func, type ) ( (type)func( stream ) ) + +#define FT_GET_CHAR() FT_GET_MACRO( FT_Stream_GetByte, FT_Char ) +#define FT_GET_BYTE() FT_GET_MACRO( FT_Stream_GetByte, FT_Byte ) +#define FT_GET_SHORT() FT_GET_MACRO( FT_Stream_GetUShort, FT_Int16 ) +#define FT_GET_USHORT() FT_GET_MACRO( FT_Stream_GetUShort, FT_UInt16 ) +#define FT_GET_UOFF3() FT_GET_MACRO( FT_Stream_GetUOffset, FT_UInt32 ) +#define FT_GET_LONG() FT_GET_MACRO( FT_Stream_GetULong, FT_Int32 ) +#define FT_GET_ULONG() FT_GET_MACRO( FT_Stream_GetULong, FT_UInt32 ) +#define FT_GET_TAG4() FT_GET_MACRO( FT_Stream_GetULong, FT_UInt32 ) + +#define FT_GET_SHORT_LE() FT_GET_MACRO( FT_Stream_GetUShortLE, FT_Int16 ) +#define FT_GET_USHORT_LE() FT_GET_MACRO( FT_Stream_GetUShortLE, FT_UInt16 ) +#define FT_GET_LONG_LE() FT_GET_MACRO( FT_Stream_GetULongLE, FT_Int32 ) +#define FT_GET_ULONG_LE() FT_GET_MACRO( FT_Stream_GetULongLE, FT_UInt32 ) +#endif + + +#define FT_READ_MACRO( func, type, var ) \ + ( var = (type)func( stream, &error ), \ + error != FT_Err_Ok ) + + /* + * The `FT_READ_XXX' macros use implicit `stream' and `error' variables. + * + * `FT_READ_XXX' can be controlled with `FT_STREAM_SEEK' and + * `FT_STREAM_POS'. They use the full machinery to check whether a read is + * valid. + */ +#define FT_READ_BYTE( var ) FT_READ_MACRO( FT_Stream_ReadByte, FT_Byte, var ) +#define FT_READ_CHAR( var ) FT_READ_MACRO( FT_Stream_ReadByte, FT_Char, var ) +#define FT_READ_SHORT( var ) FT_READ_MACRO( FT_Stream_ReadUShort, FT_Int16, var ) +#define FT_READ_USHORT( var ) FT_READ_MACRO( FT_Stream_ReadUShort, FT_UInt16, var ) +#define FT_READ_UOFF3( var ) FT_READ_MACRO( FT_Stream_ReadUOffset, FT_UInt32, var ) +#define FT_READ_LONG( var ) FT_READ_MACRO( FT_Stream_ReadULong, FT_Int32, var ) +#define FT_READ_ULONG( var ) FT_READ_MACRO( FT_Stream_ReadULong, FT_UInt32, var ) + +#define FT_READ_SHORT_LE( var ) FT_READ_MACRO( FT_Stream_ReadUShortLE, FT_Int16, var ) +#define FT_READ_USHORT_LE( var ) FT_READ_MACRO( FT_Stream_ReadUShortLE, FT_UInt16, var ) +#define FT_READ_LONG_LE( var ) FT_READ_MACRO( FT_Stream_ReadULongLE, FT_Int32, var ) +#define FT_READ_ULONG_LE( var ) FT_READ_MACRO( FT_Stream_ReadULongLE, FT_UInt32, var ) + + +#ifndef FT_CONFIG_OPTION_NO_DEFAULT_SYSTEM + + /* initialize a stream for reading a regular system stream */ + FT_BASE( FT_Error ) + FT_Stream_Open( FT_Stream stream, + const char* filepathname ); + +#endif /* FT_CONFIG_OPTION_NO_DEFAULT_SYSTEM */ + + + /* create a new (input) stream from an FT_Open_Args structure */ + FT_BASE( FT_Error ) + FT_Stream_New( FT_Library library, + const FT_Open_Args* args, + FT_Stream *astream ); + + /* free a stream */ + FT_BASE( void ) + FT_Stream_Free( FT_Stream stream, + FT_Int external ); + + /* initialize a stream for reading in-memory data */ + FT_BASE( void ) + FT_Stream_OpenMemory( FT_Stream stream, + const FT_Byte* base, + FT_ULong size ); + + /* close a stream (does not destroy the stream structure) */ + FT_BASE( void ) + FT_Stream_Close( FT_Stream stream ); + + + /* seek within a stream. position is relative to start of stream */ + FT_BASE( FT_Error ) + FT_Stream_Seek( FT_Stream stream, + FT_ULong pos ); + + /* skip bytes in a stream */ + FT_BASE( FT_Error ) + FT_Stream_Skip( FT_Stream stream, + FT_Long distance ); + + /* return current stream position */ + FT_BASE( FT_ULong ) + FT_Stream_Pos( FT_Stream stream ); + + /* read bytes from a stream into a user-allocated buffer, returns an */ + /* error if not all bytes could be read. */ + FT_BASE( FT_Error ) + FT_Stream_Read( FT_Stream stream, + FT_Byte* buffer, + FT_ULong count ); + + /* read bytes from a stream at a given position */ + FT_BASE( FT_Error ) + FT_Stream_ReadAt( FT_Stream stream, + FT_ULong pos, + FT_Byte* buffer, + FT_ULong count ); + + /* try to read bytes at the end of a stream; return number of bytes */ + /* really available */ + FT_BASE( FT_ULong ) + FT_Stream_TryRead( FT_Stream stream, + FT_Byte* buffer, + FT_ULong count ); + + /* Enter a frame of `count' consecutive bytes in a stream. Returns an */ + /* error if the frame could not be read/accessed. The caller can use */ + /* the `FT_Stream_GetXXX' functions to retrieve frame data without */ + /* error checks. */ + /* */ + /* You must _always_ call `FT_Stream_ExitFrame' once you have entered */ + /* a stream frame! */ + /* */ + /* Nested frames are not permitted. */ + /* */ + FT_BASE( FT_Error ) + FT_Stream_EnterFrame( FT_Stream stream, + FT_ULong count ); + + /* exit a stream frame */ + FT_BASE( void ) + FT_Stream_ExitFrame( FT_Stream stream ); + + + /* Extract a stream frame. If the stream is disk-based, a heap block */ + /* is allocated and the frame bytes are read into it. If the stream */ + /* is memory-based, this function simply sets a pointer to the data. */ + /* */ + /* Useful to optimize access to memory-based streams transparently. */ + /* */ + /* `FT_Stream_GetXXX' functions can't be used. */ + /* */ + /* An extracted frame must be `freed' with a call to the function */ + /* `FT_Stream_ReleaseFrame'. */ + /* */ + FT_BASE( FT_Error ) + FT_Stream_ExtractFrame( FT_Stream stream, + FT_ULong count, + FT_Byte** pbytes ); + + /* release an extract frame (see `FT_Stream_ExtractFrame') */ + FT_BASE( void ) + FT_Stream_ReleaseFrame( FT_Stream stream, + FT_Byte** pbytes ); + + + /* read a byte from an entered frame */ + FT_BASE( FT_Byte ) + FT_Stream_GetByte( FT_Stream stream ); + + /* read a 16-bit big-endian unsigned integer from an entered frame */ + FT_BASE( FT_UInt16 ) + FT_Stream_GetUShort( FT_Stream stream ); + + /* read a 24-bit big-endian unsigned integer from an entered frame */ + FT_BASE( FT_UInt32 ) + FT_Stream_GetUOffset( FT_Stream stream ); + + /* read a 32-bit big-endian unsigned integer from an entered frame */ + FT_BASE( FT_UInt32 ) + FT_Stream_GetULong( FT_Stream stream ); + + /* read a 16-bit little-endian unsigned integer from an entered frame */ + FT_BASE( FT_UInt16 ) + FT_Stream_GetUShortLE( FT_Stream stream ); + + /* read a 32-bit little-endian unsigned integer from an entered frame */ + FT_BASE( FT_UInt32 ) + FT_Stream_GetULongLE( FT_Stream stream ); + + + /* read a byte from a stream */ + FT_BASE( FT_Byte ) + FT_Stream_ReadByte( FT_Stream stream, + FT_Error* error ); + + /* read a 16-bit big-endian unsigned integer from a stream */ + FT_BASE( FT_UInt16 ) + FT_Stream_ReadUShort( FT_Stream stream, + FT_Error* error ); + + /* read a 24-bit big-endian unsigned integer from a stream */ + FT_BASE( FT_ULong ) + FT_Stream_ReadUOffset( FT_Stream stream, + FT_Error* error ); + + /* read a 32-bit big-endian integer from a stream */ + FT_BASE( FT_UInt32 ) + FT_Stream_ReadULong( FT_Stream stream, + FT_Error* error ); + + /* read a 16-bit little-endian unsigned integer from a stream */ + FT_BASE( FT_UInt16 ) + FT_Stream_ReadUShortLE( FT_Stream stream, + FT_Error* error ); + + /* read a 32-bit little-endian unsigned integer from a stream */ + FT_BASE( FT_UInt32 ) + FT_Stream_ReadULongLE( FT_Stream stream, + FT_Error* error ); + + /* Read a structure from a stream. The structure must be described */ + /* by an array of FT_Frame_Field records. */ + FT_BASE( FT_Error ) + FT_Stream_ReadFields( FT_Stream stream, + const FT_Frame_Field* fields, + void* structure ); + + +#define FT_STREAM_POS() \ + FT_Stream_Pos( stream ) + +#define FT_STREAM_SEEK( position ) \ + FT_SET_ERROR( FT_Stream_Seek( stream, \ + (FT_ULong)(position) ) ) + +#define FT_STREAM_SKIP( distance ) \ + FT_SET_ERROR( FT_Stream_Skip( stream, \ + (FT_Long)(distance) ) ) + +#define FT_STREAM_READ( buffer, count ) \ + FT_SET_ERROR( FT_Stream_Read( stream, \ + (FT_Byte*)(buffer), \ + (FT_ULong)(count) ) ) + +#define FT_STREAM_READ_AT( position, buffer, count ) \ + FT_SET_ERROR( FT_Stream_ReadAt( stream, \ + (FT_ULong)(position), \ + (FT_Byte*)(buffer), \ + (FT_ULong)(count) ) ) + +#define FT_STREAM_READ_FIELDS( fields, object ) \ + FT_SET_ERROR( FT_Stream_ReadFields( stream, fields, object ) ) + + +#define FT_FRAME_ENTER( size ) \ + FT_SET_ERROR( \ + FT_DEBUG_INNER( FT_Stream_EnterFrame( stream, \ + (FT_ULong)(size) ) ) ) + +#define FT_FRAME_EXIT() \ + FT_DEBUG_INNER( FT_Stream_ExitFrame( stream ) ) + +#define FT_FRAME_EXTRACT( size, bytes ) \ + FT_SET_ERROR( \ + FT_DEBUG_INNER( FT_Stream_ExtractFrame( stream, \ + (FT_ULong)(size), \ + (FT_Byte**)&(bytes) ) ) ) + +#define FT_FRAME_RELEASE( bytes ) \ + FT_DEBUG_INNER( FT_Stream_ReleaseFrame( stream, \ + (FT_Byte**)&(bytes) ) ) + + +FT_END_HEADER + +#endif /* FTSTREAM_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/fttrace.h b/Example/include/freetype/internal/fttrace.h new file mode 100644 index 0000000..319fe56 --- /dev/null +++ b/Example/include/freetype/internal/fttrace.h @@ -0,0 +1,172 @@ +/**************************************************************************** + * + * fttrace.h + * + * Tracing handling (specification only). + * + * Copyright (C) 2002-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /* definitions of trace levels for FreeType 2 */ + + /* the maximum string length (if the argument to `FT_TRACE_DEF` */ + /* gets used as a string) plus one charachter for ':' plus */ + /* another one for the trace level */ +#define FT_MAX_TRACE_LEVEL_LENGTH (9 + 1 + 1) + + /* the first level must always be `trace_any' */ +FT_TRACE_DEF( any ) + + /* base components */ +FT_TRACE_DEF( calc ) /* calculations (ftcalc.c) */ +FT_TRACE_DEF( gloader ) /* glyph loader (ftgloadr.c) */ +FT_TRACE_DEF( glyph ) /* glyph management (ftglyph.c) */ +FT_TRACE_DEF( memory ) /* memory manager (ftobjs.c) */ +FT_TRACE_DEF( init ) /* initialization (ftinit.c) */ +FT_TRACE_DEF( io ) /* i/o interface (ftsystem.c) */ +FT_TRACE_DEF( list ) /* list management (ftlist.c) */ +FT_TRACE_DEF( objs ) /* base objects (ftobjs.c) */ +FT_TRACE_DEF( outline ) /* outline management (ftoutln.c) */ +FT_TRACE_DEF( stream ) /* stream manager (ftstream.c) */ + +FT_TRACE_DEF( bitmap ) /* bitmap manipulation (ftbitmap.c) */ +FT_TRACE_DEF( checksum ) /* bitmap checksum (ftobjs.c) */ +FT_TRACE_DEF( mm ) /* MM interface (ftmm.c) */ +FT_TRACE_DEF( psprops ) /* PS driver properties (ftpsprop.c) */ +FT_TRACE_DEF( raccess ) /* resource fork accessor (ftrfork.c) */ +FT_TRACE_DEF( synth ) /* bold/slant synthesizer (ftsynth.c) */ + + /* rasterizers */ +FT_TRACE_DEF( raster ) /* monochrome rasterizer (ftraster.c) */ +FT_TRACE_DEF( smooth ) /* anti-aliasing raster (ftgrays.c) */ + + /* ot-svg module */ +FT_TRACE_DEF( otsvg ) /* OT-SVG renderer (ftsvg.c) */ + + /* cache sub-system */ +FT_TRACE_DEF( cache ) /* cache sub-system (ftcache.c, etc.) */ + + /* SFNT driver components */ +FT_TRACE_DEF( sfdriver ) /* SFNT font driver (sfdriver.c) */ +FT_TRACE_DEF( sfobjs ) /* SFNT object handler (sfobjs.c) */ +FT_TRACE_DEF( sfwoff ) /* WOFF format handler (sfwoff.c) */ +FT_TRACE_DEF( sfwoff2 ) /* WOFF2 format handler (sfwoff2.c) */ +FT_TRACE_DEF( ttbdf ) /* TrueType embedded BDF (ttbdf.c) */ +FT_TRACE_DEF( ttcmap ) /* charmap handler (ttcmap.c) */ +FT_TRACE_DEF( ttcolr ) /* glyph layer table (ttcolr.c) */ +FT_TRACE_DEF( ttcpal ) /* color palette table (ttcpal.c) */ +FT_TRACE_DEF( ttsvg ) /* OpenType SVG table (ttsvg.c) */ +FT_TRACE_DEF( ttkern ) /* kerning handler (ttkern.c) */ +FT_TRACE_DEF( ttload ) /* basic TrueType tables (ttload.c) */ +FT_TRACE_DEF( ttmtx ) /* metrics-related tables (ttmtx.c) */ +FT_TRACE_DEF( ttpost ) /* PS table processing (ttpost.c) */ +FT_TRACE_DEF( ttsbit ) /* TrueType sbit handling (ttsbit.c) */ + + /* TrueType driver components */ +FT_TRACE_DEF( ttdriver ) /* TT font driver (ttdriver.c) */ +FT_TRACE_DEF( ttgload ) /* TT glyph loader (ttgload.c) */ +FT_TRACE_DEF( ttgxvar ) /* TrueType GX var handler (ttgxvar.c) */ +FT_TRACE_DEF( ttinterp ) /* bytecode interpreter (ttinterp.c) */ +FT_TRACE_DEF( ttobjs ) /* TT objects manager (ttobjs.c) */ +FT_TRACE_DEF( ttpload ) /* TT data/program loader (ttpload.c) */ + + /* Type 1 driver components */ +FT_TRACE_DEF( t1afm ) +FT_TRACE_DEF( t1driver ) +FT_TRACE_DEF( t1gload ) +FT_TRACE_DEF( t1load ) +FT_TRACE_DEF( t1objs ) +FT_TRACE_DEF( t1parse ) + + /* PostScript helper module `psaux' */ +FT_TRACE_DEF( afmparse ) +FT_TRACE_DEF( cffdecode ) +FT_TRACE_DEF( psconv ) +FT_TRACE_DEF( psobjs ) +FT_TRACE_DEF( t1decode ) + + /* PostScript hinting module `pshinter' */ +FT_TRACE_DEF( pshalgo ) +FT_TRACE_DEF( pshrec ) + + /* Type 2 driver components */ +FT_TRACE_DEF( cffdriver ) +FT_TRACE_DEF( cffgload ) +FT_TRACE_DEF( cffload ) +FT_TRACE_DEF( cffobjs ) +FT_TRACE_DEF( cffparse ) + +FT_TRACE_DEF( cf2blues ) +FT_TRACE_DEF( cf2hints ) +FT_TRACE_DEF( cf2interp ) + + /* Type 42 driver component */ +FT_TRACE_DEF( t42 ) + + /* CID driver components */ +FT_TRACE_DEF( ciddriver ) +FT_TRACE_DEF( cidgload ) +FT_TRACE_DEF( cidload ) +FT_TRACE_DEF( cidobjs ) +FT_TRACE_DEF( cidparse ) + + /* Windows font component */ +FT_TRACE_DEF( winfnt ) + + /* PCF font components */ +FT_TRACE_DEF( pcfdriver ) +FT_TRACE_DEF( pcfread ) + + /* BDF font components */ +FT_TRACE_DEF( bdfdriver ) +FT_TRACE_DEF( bdflib ) + + /* PFR font component */ +FT_TRACE_DEF( pfr ) + + /* OpenType validation components */ +FT_TRACE_DEF( otvcommon ) +FT_TRACE_DEF( otvbase ) +FT_TRACE_DEF( otvgdef ) +FT_TRACE_DEF( otvgpos ) +FT_TRACE_DEF( otvgsub ) +FT_TRACE_DEF( otvjstf ) +FT_TRACE_DEF( otvmath ) +FT_TRACE_DEF( otvmodule ) + + /* TrueTypeGX/AAT validation components */ +FT_TRACE_DEF( gxvbsln ) +FT_TRACE_DEF( gxvcommon ) +FT_TRACE_DEF( gxvfeat ) +FT_TRACE_DEF( gxvjust ) +FT_TRACE_DEF( gxvkern ) +FT_TRACE_DEF( gxvmodule ) +FT_TRACE_DEF( gxvmort ) +FT_TRACE_DEF( gxvmorx ) +FT_TRACE_DEF( gxvlcar ) +FT_TRACE_DEF( gxvopbd ) +FT_TRACE_DEF( gxvprop ) +FT_TRACE_DEF( gxvtrak ) + + /* autofit components */ +FT_TRACE_DEF( afcjk ) +FT_TRACE_DEF( afglobal ) +FT_TRACE_DEF( afhints ) +FT_TRACE_DEF( afmodule ) +FT_TRACE_DEF( aflatin ) +FT_TRACE_DEF( afshaper ) + + /* SDF components */ +FT_TRACE_DEF( sdf ) /* signed distance raster for outlines (ftsdf.c) */ +FT_TRACE_DEF( bsdf ) /* signed distance raster for bitmaps (ftbsdf.c) */ + +/* END */ diff --git a/Example/include/freetype/internal/ftvalid.h b/Example/include/freetype/internal/ftvalid.h new file mode 100644 index 0000000..e98ee4e --- /dev/null +++ b/Example/include/freetype/internal/ftvalid.h @@ -0,0 +1,160 @@ +/**************************************************************************** + * + * ftvalid.h + * + * FreeType validation support (specification). + * + * Copyright (C) 2004-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef FTVALID_H_ +#define FTVALID_H_ + +#include +#include FT_CONFIG_STANDARD_LIBRARY_H /* for ft_jmpbuf */ + +#include "compiler-macros.h" + +FT_BEGIN_HEADER + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /**** ****/ + /**** ****/ + /**** V A L I D A T I O N ****/ + /**** ****/ + /**** ****/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + /* handle to a validation object */ + typedef struct FT_ValidatorRec_ volatile* FT_Validator; + + + /************************************************************************** + * + * There are three distinct validation levels defined here: + * + * FT_VALIDATE_DEFAULT :: + * A table that passes this validation level can be used reliably by + * FreeType. It generally means that all offsets have been checked to + * prevent out-of-bound reads, that array counts are correct, etc. + * + * FT_VALIDATE_TIGHT :: + * A table that passes this validation level can be used reliably and + * doesn't contain invalid data. For example, a charmap table that + * returns invalid glyph indices will not pass, even though it can be + * used with FreeType in default mode (the library will simply return an + * error later when trying to load the glyph). + * + * It also checks that fields which must be a multiple of 2, 4, or 8, + * don't have incorrect values, etc. + * + * FT_VALIDATE_PARANOID :: + * Only for font debugging. Checks that a table follows the + * specification by 100%. Very few fonts will be able to pass this level + * anyway but it can be useful for certain tools like font + * editors/converters. + */ + typedef enum FT_ValidationLevel_ + { + FT_VALIDATE_DEFAULT = 0, + FT_VALIDATE_TIGHT, + FT_VALIDATE_PARANOID + + } FT_ValidationLevel; + + +#if defined( _MSC_VER ) /* Visual C++ (and Intel C++) */ + /* We disable the warning `structure was padded due to */ + /* __declspec(align())' in order to compile cleanly with */ + /* the maximum level of warnings. */ +#pragma warning( push ) +#pragma warning( disable : 4324 ) +#endif /* _MSC_VER */ + + /* validator structure */ + typedef struct FT_ValidatorRec_ + { + ft_jmp_buf jump_buffer; /* used for exception handling */ + + const FT_Byte* base; /* address of table in memory */ + const FT_Byte* limit; /* `base' + sizeof(table) in memory */ + FT_ValidationLevel level; /* validation level */ + FT_Error error; /* error returned. 0 means success */ + + } FT_ValidatorRec; + +#if defined( _MSC_VER ) +#pragma warning( pop ) +#endif + +#define FT_VALIDATOR( x ) ( (FT_Validator)( x ) ) + + + FT_BASE( void ) + ft_validator_init( FT_Validator valid, + const FT_Byte* base, + const FT_Byte* limit, + FT_ValidationLevel level ); + + /* Do not use this. It's broken and will cause your validator to crash */ + /* if you run it on an invalid font. */ + FT_BASE( FT_Int ) + ft_validator_run( FT_Validator valid ); + + /* Sets the error field in a validator, then calls `longjmp' to return */ + /* to high-level caller. Using `setjmp/longjmp' avoids many stupid */ + /* error checks within the validation routines. */ + /* */ + FT_BASE( void ) + ft_validator_error( FT_Validator valid, + FT_Error error ); + + + /* Calls ft_validate_error. Assumes that the `valid' local variable */ + /* holds a pointer to the current validator object. */ + /* */ +#define FT_INVALID( _error ) FT_INVALID_( _error ) +#define FT_INVALID_( _error ) \ + ft_validator_error( valid, FT_THROW( _error ) ) + + /* called when a broken table is detected */ +#define FT_INVALID_TOO_SHORT \ + FT_INVALID( Invalid_Table ) + + /* called when an invalid offset is detected */ +#define FT_INVALID_OFFSET \ + FT_INVALID( Invalid_Offset ) + + /* called when an invalid format/value is detected */ +#define FT_INVALID_FORMAT \ + FT_INVALID( Invalid_Table ) + + /* called when an invalid glyph index is detected */ +#define FT_INVALID_GLYPH_ID \ + FT_INVALID( Invalid_Glyph_Index ) + + /* called when an invalid field value is detected */ +#define FT_INVALID_DATA \ + FT_INVALID( Invalid_Table ) + + +FT_END_HEADER + +#endif /* FTVALID_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/psaux.h b/Example/include/freetype/internal/psaux.h new file mode 100644 index 0000000..dfb1987 --- /dev/null +++ b/Example/include/freetype/internal/psaux.h @@ -0,0 +1,1434 @@ +/**************************************************************************** + * + * psaux.h + * + * Auxiliary functions and data structures related to PostScript fonts + * (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef PSAUX_H_ +#define PSAUX_H_ + + +#include +#include +#include +#include +#include +#include +#include + + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * PostScript modules driver class. + */ + typedef struct PS_DriverRec_ + { + FT_DriverRec root; + + FT_UInt hinting_engine; + FT_Bool no_stem_darkening; + FT_Int darken_params[8]; + FT_Int32 random_seed; + + } PS_DriverRec, *PS_Driver; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** T1_TABLE *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + + typedef struct PS_TableRec_* PS_Table; + typedef const struct PS_Table_FuncsRec_* PS_Table_Funcs; + + + /************************************************************************** + * + * @struct: + * PS_Table_FuncsRec + * + * @description: + * A set of function pointers to manage PS_Table objects. + * + * @fields: + * table_init :: + * Used to initialize a table. + * + * table_done :: + * Finalizes resp. destroy a given table. + * + * table_add :: + * Adds a new object to a table. + * + * table_release :: + * Releases table data, then finalizes it. + */ + typedef struct PS_Table_FuncsRec_ + { + FT_Error + (*init)( PS_Table table, + FT_Int count, + FT_Memory memory ); + + void + (*done)( PS_Table table ); + + FT_Error + (*add)( PS_Table table, + FT_Int idx, + const void* object, + FT_UInt length ); + + void + (*release)( PS_Table table ); + + } PS_Table_FuncsRec; + + + /************************************************************************** + * + * @struct: + * PS_TableRec + * + * @description: + * A PS_Table is a simple object used to store an array of objects in a + * single memory block. + * + * @fields: + * block :: + * The address in memory of the growheap's block. This can change + * between two object adds, due to reallocation. + * + * cursor :: + * The current top of the grow heap within its block. + * + * capacity :: + * The current size of the heap block. Increments by 1kByte chunks. + * + * init :: + * Set to 0xDEADBEEF if 'elements' and 'lengths' have been allocated. + * + * max_elems :: + * The maximum number of elements in table. + * + * elements :: + * A table of element addresses within the block. + * + * lengths :: + * A table of element sizes within the block. + * + * memory :: + * The object used for memory operations (alloc/realloc). + * + * funcs :: + * A table of method pointers for this object. + */ + typedef struct PS_TableRec_ + { + FT_Byte* block; /* current memory block */ + FT_Offset cursor; /* current cursor in memory block */ + FT_Offset capacity; /* current size of memory block */ + FT_ULong init; + + FT_Int max_elems; + FT_Byte** elements; /* addresses of table elements */ + FT_UInt* lengths; /* lengths of table elements */ + + FT_Memory memory; + PS_Table_FuncsRec funcs; + + } PS_TableRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** T1 FIELDS & TOKENS *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + typedef struct PS_ParserRec_* PS_Parser; + + typedef struct T1_TokenRec_* T1_Token; + + typedef struct T1_FieldRec_* T1_Field; + + + /* simple enumeration type used to identify token types */ + typedef enum T1_TokenType_ + { + T1_TOKEN_TYPE_NONE = 0, + T1_TOKEN_TYPE_ANY, + T1_TOKEN_TYPE_STRING, + T1_TOKEN_TYPE_ARRAY, + T1_TOKEN_TYPE_KEY, /* aka `name' */ + + /* do not remove */ + T1_TOKEN_TYPE_MAX + + } T1_TokenType; + + + /* a simple structure used to identify tokens */ + typedef struct T1_TokenRec_ + { + FT_Byte* start; /* first character of token in input stream */ + FT_Byte* limit; /* first character after the token */ + T1_TokenType type; /* type of token */ + + } T1_TokenRec; + + + /* enumeration type used to identify object fields */ + typedef enum T1_FieldType_ + { + T1_FIELD_TYPE_NONE = 0, + T1_FIELD_TYPE_BOOL, + T1_FIELD_TYPE_INTEGER, + T1_FIELD_TYPE_FIXED, + T1_FIELD_TYPE_FIXED_1000, + T1_FIELD_TYPE_STRING, + T1_FIELD_TYPE_KEY, + T1_FIELD_TYPE_BBOX, + T1_FIELD_TYPE_MM_BBOX, + T1_FIELD_TYPE_INTEGER_ARRAY, + T1_FIELD_TYPE_FIXED_ARRAY, + T1_FIELD_TYPE_CALLBACK, + + /* do not remove */ + T1_FIELD_TYPE_MAX + + } T1_FieldType; + + + typedef enum T1_FieldLocation_ + { + T1_FIELD_LOCATION_CID_INFO, + T1_FIELD_LOCATION_FONT_DICT, + T1_FIELD_LOCATION_FONT_EXTRA, + T1_FIELD_LOCATION_FONT_INFO, + T1_FIELD_LOCATION_PRIVATE, + T1_FIELD_LOCATION_BBOX, + T1_FIELD_LOCATION_LOADER, + T1_FIELD_LOCATION_FACE, + T1_FIELD_LOCATION_BLEND, + + /* do not remove */ + T1_FIELD_LOCATION_MAX + + } T1_FieldLocation; + + + typedef void + (*T1_Field_ParseFunc)( FT_Face face, + FT_Pointer parser ); + + + /* structure type used to model object fields */ + typedef struct T1_FieldRec_ + { + const char* ident; /* field identifier */ + T1_FieldLocation location; + T1_FieldType type; /* type of field */ + T1_Field_ParseFunc reader; + FT_UInt offset; /* offset of field in object */ + FT_Byte size; /* size of field in bytes */ + FT_UInt array_max; /* maximum number of elements for */ + /* array */ + FT_UInt count_offset; /* offset of element count for */ + /* arrays; must not be zero if in */ + /* use -- in other words, a */ + /* `num_FOO' element must not */ + /* start the used structure if we */ + /* parse a `FOO' array */ + FT_UInt dict; /* where we expect it */ + } T1_FieldRec; + +#define T1_FIELD_DICT_FONTDICT ( 1 << 0 ) /* also FontInfo and FDArray */ +#define T1_FIELD_DICT_PRIVATE ( 1 << 1 ) + + + +#define T1_NEW_SIMPLE_FIELD( _ident, _type, _fname, _dict ) \ + { \ + _ident, T1CODE, _type, \ + 0, \ + FT_FIELD_OFFSET( _fname ), \ + FT_FIELD_SIZE( _fname ), \ + 0, 0, \ + _dict \ + }, + +#define T1_NEW_CALLBACK_FIELD( _ident, _reader, _dict ) \ + { \ + _ident, T1CODE, T1_FIELD_TYPE_CALLBACK, \ + (T1_Field_ParseFunc)_reader, \ + 0, 0, \ + 0, 0, \ + _dict \ + }, + +#define T1_NEW_TABLE_FIELD( _ident, _type, _fname, _max, _dict ) \ + { \ + _ident, T1CODE, _type, \ + 0, \ + FT_FIELD_OFFSET( _fname ), \ + FT_FIELD_SIZE_DELTA( _fname ), \ + _max, \ + FT_FIELD_OFFSET( num_ ## _fname ), \ + _dict \ + }, + +#define T1_NEW_TABLE_FIELD2( _ident, _type, _fname, _max, _dict ) \ + { \ + _ident, T1CODE, _type, \ + 0, \ + FT_FIELD_OFFSET( _fname ), \ + FT_FIELD_SIZE_DELTA( _fname ), \ + _max, 0, \ + _dict \ + }, + + +#define T1_FIELD_BOOL( _ident, _fname, _dict ) \ + T1_NEW_SIMPLE_FIELD( _ident, T1_FIELD_TYPE_BOOL, _fname, _dict ) + +#define T1_FIELD_NUM( _ident, _fname, _dict ) \ + T1_NEW_SIMPLE_FIELD( _ident, T1_FIELD_TYPE_INTEGER, _fname, _dict ) + +#define T1_FIELD_FIXED( _ident, _fname, _dict ) \ + T1_NEW_SIMPLE_FIELD( _ident, T1_FIELD_TYPE_FIXED, _fname, _dict ) + +#define T1_FIELD_FIXED_1000( _ident, _fname, _dict ) \ + T1_NEW_SIMPLE_FIELD( _ident, T1_FIELD_TYPE_FIXED_1000, _fname, \ + _dict ) + +#define T1_FIELD_STRING( _ident, _fname, _dict ) \ + T1_NEW_SIMPLE_FIELD( _ident, T1_FIELD_TYPE_STRING, _fname, _dict ) + +#define T1_FIELD_KEY( _ident, _fname, _dict ) \ + T1_NEW_SIMPLE_FIELD( _ident, T1_FIELD_TYPE_KEY, _fname, _dict ) + +#define T1_FIELD_BBOX( _ident, _fname, _dict ) \ + T1_NEW_SIMPLE_FIELD( _ident, T1_FIELD_TYPE_BBOX, _fname, _dict ) + + +#define T1_FIELD_NUM_TABLE( _ident, _fname, _fmax, _dict ) \ + T1_NEW_TABLE_FIELD( _ident, T1_FIELD_TYPE_INTEGER_ARRAY, \ + _fname, _fmax, _dict ) + +#define T1_FIELD_FIXED_TABLE( _ident, _fname, _fmax, _dict ) \ + T1_NEW_TABLE_FIELD( _ident, T1_FIELD_TYPE_FIXED_ARRAY, \ + _fname, _fmax, _dict ) + +#define T1_FIELD_NUM_TABLE2( _ident, _fname, _fmax, _dict ) \ + T1_NEW_TABLE_FIELD2( _ident, T1_FIELD_TYPE_INTEGER_ARRAY, \ + _fname, _fmax, _dict ) + +#define T1_FIELD_FIXED_TABLE2( _ident, _fname, _fmax, _dict ) \ + T1_NEW_TABLE_FIELD2( _ident, T1_FIELD_TYPE_FIXED_ARRAY, \ + _fname, _fmax, _dict ) + +#define T1_FIELD_CALLBACK( _ident, _name, _dict ) \ + T1_NEW_CALLBACK_FIELD( _ident, _name, _dict ) + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** T1 PARSER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + typedef const struct PS_Parser_FuncsRec_* PS_Parser_Funcs; + + typedef struct PS_Parser_FuncsRec_ + { + void + (*init)( PS_Parser parser, + FT_Byte* base, + FT_Byte* limit, + FT_Memory memory ); + + void + (*done)( PS_Parser parser ); + + void + (*skip_spaces)( PS_Parser parser ); + void + (*skip_PS_token)( PS_Parser parser ); + + FT_Long + (*to_int)( PS_Parser parser ); + FT_Fixed + (*to_fixed)( PS_Parser parser, + FT_Int power_ten ); + + FT_Error + (*to_bytes)( PS_Parser parser, + FT_Byte* bytes, + FT_Offset max_bytes, + FT_ULong* pnum_bytes, + FT_Bool delimiters ); + + FT_Int + (*to_coord_array)( PS_Parser parser, + FT_Int max_coords, + FT_Short* coords ); + FT_Int + (*to_fixed_array)( PS_Parser parser, + FT_Int max_values, + FT_Fixed* values, + FT_Int power_ten ); + + void + (*to_token)( PS_Parser parser, + T1_Token token ); + void + (*to_token_array)( PS_Parser parser, + T1_Token tokens, + FT_UInt max_tokens, + FT_Int* pnum_tokens ); + + FT_Error + (*load_field)( PS_Parser parser, + const T1_Field field, + void** objects, + FT_UInt max_objects, + FT_ULong* pflags ); + + FT_Error + (*load_field_table)( PS_Parser parser, + const T1_Field field, + void** objects, + FT_UInt max_objects, + FT_ULong* pflags ); + + } PS_Parser_FuncsRec; + + + /************************************************************************** + * + * @struct: + * PS_ParserRec + * + * @description: + * A PS_Parser is an object used to parse a Type 1 font very quickly. + * + * @fields: + * cursor :: + * The current position in the text. + * + * base :: + * Start of the processed text. + * + * limit :: + * End of the processed text. + * + * error :: + * The last error returned. + * + * memory :: + * The object used for memory operations (alloc/realloc). + * + * funcs :: + * A table of functions for the parser. + */ + typedef struct PS_ParserRec_ + { + FT_Byte* cursor; + FT_Byte* base; + FT_Byte* limit; + FT_Error error; + FT_Memory memory; + + PS_Parser_FuncsRec funcs; + + } PS_ParserRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** PS BUILDER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + + typedef struct PS_Builder_ PS_Builder; + typedef const struct PS_Builder_FuncsRec_* PS_Builder_Funcs; + + typedef struct PS_Builder_FuncsRec_ + { + void + (*init)( PS_Builder* ps_builder, + void* builder, + FT_Bool is_t1 ); + + void + (*done)( PS_Builder* builder ); + + } PS_Builder_FuncsRec; + + + /************************************************************************** + * + * @struct: + * PS_Builder + * + * @description: + * A structure used during glyph loading to store its outline. + * + * @fields: + * memory :: + * The current memory object. + * + * face :: + * The current face object. + * + * glyph :: + * The current glyph slot. + * + * loader :: + * XXX + * + * base :: + * The base glyph outline. + * + * current :: + * The current glyph outline. + * + * pos_x :: + * The horizontal translation (if composite glyph). + * + * pos_y :: + * The vertical translation (if composite glyph). + * + * left_bearing :: + * The left side bearing point. + * + * advance :: + * The horizontal advance vector. + * + * bbox :: + * Unused. + * + * path_begun :: + * A flag which indicates that a new path has begun. + * + * load_points :: + * If this flag is not set, no points are loaded. + * + * no_recurse :: + * Set but not used. + * + * metrics_only :: + * A boolean indicating that we only want to compute the metrics of a + * given glyph, not load all of its points. + * + * is_t1 :: + * Set if current font type is Type 1. + * + * funcs :: + * An array of function pointers for the builder. + */ + struct PS_Builder_ + { + FT_Memory memory; + FT_Face face; + CFF_GlyphSlot glyph; + FT_GlyphLoader loader; + FT_Outline* base; + FT_Outline* current; + + FT_Pos* pos_x; + FT_Pos* pos_y; + + FT_Vector* left_bearing; + FT_Vector* advance; + + FT_BBox* bbox; /* bounding box */ + FT_Bool path_begun; + FT_Bool load_points; + FT_Bool no_recurse; + + FT_Bool metrics_only; + FT_Bool is_t1; + + PS_Builder_FuncsRec funcs; + + }; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** PS DECODER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + +#define PS_MAX_OPERANDS 48 +#define PS_MAX_SUBRS_CALLS 16 /* maximum subroutine nesting; */ + /* only 10 are allowed but there exist */ + /* fonts like `HiraKakuProN-W3.ttf' */ + /* (Hiragino Kaku Gothic ProN W3; */ + /* 8.2d6e1; 2014-12-19) that exceed */ + /* this limit */ + + /* execution context charstring zone */ + + typedef struct PS_Decoder_Zone_ + { + FT_Byte* base; + FT_Byte* limit; + FT_Byte* cursor; + + } PS_Decoder_Zone; + + + typedef FT_Error + (*CFF_Decoder_Get_Glyph_Callback)( TT_Face face, + FT_UInt glyph_index, + FT_Byte** pointer, + FT_ULong* length ); + + typedef void + (*CFF_Decoder_Free_Glyph_Callback)( TT_Face face, + FT_Byte** pointer, + FT_ULong length ); + + + typedef struct PS_Decoder_ + { + PS_Builder builder; + + FT_Fixed stack[PS_MAX_OPERANDS + 1]; + FT_Fixed* top; + + PS_Decoder_Zone zones[PS_MAX_SUBRS_CALLS + 1]; + PS_Decoder_Zone* zone; + + FT_Int flex_state; + FT_Int num_flex_vectors; + FT_Vector flex_vectors[7]; + + CFF_Font cff; + CFF_SubFont current_subfont; /* for current glyph_index */ + FT_Generic* cf2_instance; + + FT_Pos* glyph_width; + FT_Bool width_only; + FT_Int num_hints; + + FT_UInt num_locals; + FT_UInt num_globals; + + FT_Int locals_bias; + FT_Int globals_bias; + + FT_Byte** locals; + FT_Byte** globals; + + FT_Byte** glyph_names; /* for pure CFF fonts only */ + FT_UInt num_glyphs; /* number of glyphs in font */ + + FT_Render_Mode hint_mode; + + FT_Bool seac; + + CFF_Decoder_Get_Glyph_Callback get_glyph_callback; + CFF_Decoder_Free_Glyph_Callback free_glyph_callback; + + /* Type 1 stuff */ + FT_Service_PsCMaps psnames; /* for seac */ + + FT_Int lenIV; /* internal for sub routine calls */ + FT_UInt* locals_len; /* array of subrs length (optional) */ + FT_Hash locals_hash; /* used if `num_subrs' was massaged */ + + FT_Matrix font_matrix; + FT_Vector font_offset; + + PS_Blend blend; /* for multiple master support */ + + FT_Long* buildchar; + FT_UInt len_buildchar; + + } PS_Decoder; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** T1 BUILDER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + + typedef struct T1_BuilderRec_* T1_Builder; + + + typedef FT_Error + (*T1_Builder_Check_Points_Func)( T1_Builder builder, + FT_Int count ); + + typedef void + (*T1_Builder_Add_Point_Func)( T1_Builder builder, + FT_Pos x, + FT_Pos y, + FT_Byte flag ); + + typedef FT_Error + (*T1_Builder_Add_Point1_Func)( T1_Builder builder, + FT_Pos x, + FT_Pos y ); + + typedef FT_Error + (*T1_Builder_Add_Contour_Func)( T1_Builder builder ); + + typedef FT_Error + (*T1_Builder_Start_Point_Func)( T1_Builder builder, + FT_Pos x, + FT_Pos y ); + + typedef void + (*T1_Builder_Close_Contour_Func)( T1_Builder builder ); + + + typedef const struct T1_Builder_FuncsRec_* T1_Builder_Funcs; + + typedef struct T1_Builder_FuncsRec_ + { + void + (*init)( T1_Builder builder, + FT_Face face, + FT_Size size, + FT_GlyphSlot slot, + FT_Bool hinting ); + + void + (*done)( T1_Builder builder ); + + T1_Builder_Check_Points_Func check_points; + T1_Builder_Add_Point_Func add_point; + T1_Builder_Add_Point1_Func add_point1; + T1_Builder_Add_Contour_Func add_contour; + T1_Builder_Start_Point_Func start_point; + T1_Builder_Close_Contour_Func close_contour; + + } T1_Builder_FuncsRec; + + + /* an enumeration type to handle charstring parsing states */ + typedef enum T1_ParseState_ + { + T1_Parse_Start, + T1_Parse_Have_Width, + T1_Parse_Have_Moveto, + T1_Parse_Have_Path + + } T1_ParseState; + + + /************************************************************************** + * + * @struct: + * T1_BuilderRec + * + * @description: + * A structure used during glyph loading to store its outline. + * + * @fields: + * memory :: + * The current memory object. + * + * face :: + * The current face object. + * + * glyph :: + * The current glyph slot. + * + * loader :: + * XXX + * + * base :: + * The base glyph outline. + * + * current :: + * The current glyph outline. + * + * max_points :: + * maximum points in builder outline + * + * max_contours :: + * Maximum number of contours in builder outline. + * + * pos_x :: + * The horizontal translation (if composite glyph). + * + * pos_y :: + * The vertical translation (if composite glyph). + * + * left_bearing :: + * The left side bearing point. + * + * advance :: + * The horizontal advance vector. + * + * bbox :: + * Unused. + * + * parse_state :: + * An enumeration which controls the charstring parsing state. + * + * load_points :: + * If this flag is not set, no points are loaded. + * + * no_recurse :: + * Set but not used. + * + * metrics_only :: + * A boolean indicating that we only want to compute the metrics of a + * given glyph, not load all of its points. + * + * funcs :: + * An array of function pointers for the builder. + */ + typedef struct T1_BuilderRec_ + { + FT_Memory memory; + FT_Face face; + FT_GlyphSlot glyph; + FT_GlyphLoader loader; + FT_Outline* base; + FT_Outline* current; + + FT_Pos pos_x; + FT_Pos pos_y; + + FT_Vector left_bearing; + FT_Vector advance; + + FT_BBox bbox; /* bounding box */ + T1_ParseState parse_state; + FT_Bool load_points; + FT_Bool no_recurse; + + FT_Bool metrics_only; + + void* hints_funcs; /* hinter-specific */ + void* hints_globals; /* hinter-specific */ + + T1_Builder_FuncsRec funcs; + + } T1_BuilderRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** T1 DECODER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + +#if 0 + + /************************************************************************** + * + * T1_MAX_SUBRS_CALLS details the maximum number of nested sub-routine + * calls during glyph loading. + */ +#define T1_MAX_SUBRS_CALLS 8 + + + /************************************************************************** + * + * T1_MAX_CHARSTRING_OPERANDS is the charstring stack's capacity. A + * minimum of 16 is required. + */ +#define T1_MAX_CHARSTRINGS_OPERANDS 32 + +#endif /* 0 */ + + + typedef struct T1_Decoder_ZoneRec_ + { + FT_Byte* cursor; + FT_Byte* base; + FT_Byte* limit; + + } T1_Decoder_ZoneRec, *T1_Decoder_Zone; + + + typedef struct T1_DecoderRec_* T1_Decoder; + typedef const struct T1_Decoder_FuncsRec_* T1_Decoder_Funcs; + + + typedef FT_Error + (*T1_Decoder_Callback)( T1_Decoder decoder, + FT_UInt glyph_index ); + + + typedef struct T1_Decoder_FuncsRec_ + { + FT_Error + (*init)( T1_Decoder decoder, + FT_Face face, + FT_Size size, + FT_GlyphSlot slot, + FT_Byte** glyph_names, + PS_Blend blend, + FT_Bool hinting, + FT_Render_Mode hint_mode, + T1_Decoder_Callback callback ); + + void + (*done)( T1_Decoder decoder ); + +#ifdef T1_CONFIG_OPTION_OLD_ENGINE + FT_Error + (*parse_charstrings_old)( T1_Decoder decoder, + FT_Byte* base, + FT_UInt len ); +#else + FT_Error + (*parse_metrics)( T1_Decoder decoder, + FT_Byte* base, + FT_UInt len ); +#endif + + FT_Error + (*parse_charstrings)( PS_Decoder* decoder, + FT_Byte* charstring_base, + FT_ULong charstring_len ); + + + } T1_Decoder_FuncsRec; + + + typedef struct T1_DecoderRec_ + { + T1_BuilderRec builder; + + FT_Long stack[T1_MAX_CHARSTRINGS_OPERANDS]; + FT_Long* top; + + T1_Decoder_ZoneRec zones[T1_MAX_SUBRS_CALLS + 1]; + T1_Decoder_Zone zone; + + FT_Service_PsCMaps psnames; /* for seac */ + FT_UInt num_glyphs; + FT_Byte** glyph_names; + + FT_Int lenIV; /* internal for sub routine calls */ + FT_Int num_subrs; + FT_Byte** subrs; + FT_UInt* subrs_len; /* array of subrs length (optional) */ + FT_Hash subrs_hash; /* used if `num_subrs' was massaged */ + + FT_Matrix font_matrix; + FT_Vector font_offset; + + FT_Int flex_state; + FT_Int num_flex_vectors; + FT_Vector flex_vectors[7]; + + PS_Blend blend; /* for multiple master support */ + + FT_Render_Mode hint_mode; + + T1_Decoder_Callback parse_callback; + T1_Decoder_FuncsRec funcs; + + FT_Long* buildchar; + FT_UInt len_buildchar; + + FT_Bool seac; + + FT_Generic cf2_instance; + + } T1_DecoderRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** CFF BUILDER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + + typedef struct CFF_Builder_ CFF_Builder; + + + typedef FT_Error + (*CFF_Builder_Check_Points_Func)( CFF_Builder* builder, + FT_Int count ); + + typedef void + (*CFF_Builder_Add_Point_Func)( CFF_Builder* builder, + FT_Pos x, + FT_Pos y, + FT_Byte flag ); + typedef FT_Error + (*CFF_Builder_Add_Point1_Func)( CFF_Builder* builder, + FT_Pos x, + FT_Pos y ); + typedef FT_Error + (*CFF_Builder_Start_Point_Func)( CFF_Builder* builder, + FT_Pos x, + FT_Pos y ); + typedef void + (*CFF_Builder_Close_Contour_Func)( CFF_Builder* builder ); + + typedef FT_Error + (*CFF_Builder_Add_Contour_Func)( CFF_Builder* builder ); + + typedef const struct CFF_Builder_FuncsRec_* CFF_Builder_Funcs; + + typedef struct CFF_Builder_FuncsRec_ + { + void + (*init)( CFF_Builder* builder, + TT_Face face, + CFF_Size size, + CFF_GlyphSlot glyph, + FT_Bool hinting ); + + void + (*done)( CFF_Builder* builder ); + + CFF_Builder_Check_Points_Func check_points; + CFF_Builder_Add_Point_Func add_point; + CFF_Builder_Add_Point1_Func add_point1; + CFF_Builder_Add_Contour_Func add_contour; + CFF_Builder_Start_Point_Func start_point; + CFF_Builder_Close_Contour_Func close_contour; + + } CFF_Builder_FuncsRec; + + + /************************************************************************** + * + * @struct: + * CFF_Builder + * + * @description: + * A structure used during glyph loading to store its outline. + * + * @fields: + * memory :: + * The current memory object. + * + * face :: + * The current face object. + * + * glyph :: + * The current glyph slot. + * + * loader :: + * The current glyph loader. + * + * base :: + * The base glyph outline. + * + * current :: + * The current glyph outline. + * + * pos_x :: + * The horizontal translation (if composite glyph). + * + * pos_y :: + * The vertical translation (if composite glyph). + * + * left_bearing :: + * The left side bearing point. + * + * advance :: + * The horizontal advance vector. + * + * bbox :: + * Unused. + * + * path_begun :: + * A flag which indicates that a new path has begun. + * + * load_points :: + * If this flag is not set, no points are loaded. + * + * no_recurse :: + * Set but not used. + * + * metrics_only :: + * A boolean indicating that we only want to compute the metrics of a + * given glyph, not load all of its points. + * + * hints_funcs :: + * Auxiliary pointer for hinting. + * + * hints_globals :: + * Auxiliary pointer for hinting. + * + * funcs :: + * A table of method pointers for this object. + */ + struct CFF_Builder_ + { + FT_Memory memory; + TT_Face face; + CFF_GlyphSlot glyph; + FT_GlyphLoader loader; + FT_Outline* base; + FT_Outline* current; + + FT_Pos pos_x; + FT_Pos pos_y; + + FT_Vector left_bearing; + FT_Vector advance; + + FT_BBox bbox; /* bounding box */ + + FT_Bool path_begun; + FT_Bool load_points; + FT_Bool no_recurse; + + FT_Bool metrics_only; + + void* hints_funcs; /* hinter-specific */ + void* hints_globals; /* hinter-specific */ + + CFF_Builder_FuncsRec funcs; + }; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** CFF DECODER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + +#define CFF_MAX_OPERANDS 48 +#define CFF_MAX_SUBRS_CALLS 16 /* maximum subroutine nesting; */ + /* only 10 are allowed but there exist */ + /* fonts like `HiraKakuProN-W3.ttf' */ + /* (Hiragino Kaku Gothic ProN W3; */ + /* 8.2d6e1; 2014-12-19) that exceed */ + /* this limit */ +#define CFF_MAX_TRANS_ELEMENTS 32 + + /* execution context charstring zone */ + + typedef struct CFF_Decoder_Zone_ + { + FT_Byte* base; + FT_Byte* limit; + FT_Byte* cursor; + + } CFF_Decoder_Zone; + + + typedef struct CFF_Decoder_ + { + CFF_Builder builder; + CFF_Font cff; + + FT_Fixed stack[CFF_MAX_OPERANDS + 1]; + FT_Fixed* top; + + CFF_Decoder_Zone zones[CFF_MAX_SUBRS_CALLS + 1]; + CFF_Decoder_Zone* zone; + + FT_Int flex_state; + FT_Int num_flex_vectors; + FT_Vector flex_vectors[7]; + + FT_Pos glyph_width; + FT_Pos nominal_width; + + FT_Bool read_width; + FT_Bool width_only; + FT_Int num_hints; + FT_Fixed buildchar[CFF_MAX_TRANS_ELEMENTS]; + + FT_UInt num_locals; + FT_UInt num_globals; + + FT_Int locals_bias; + FT_Int globals_bias; + + FT_Byte** locals; + FT_Byte** globals; + + FT_Byte** glyph_names; /* for pure CFF fonts only */ + FT_UInt num_glyphs; /* number of glyphs in font */ + + FT_Render_Mode hint_mode; + + FT_Bool seac; + + CFF_SubFont current_subfont; /* for current glyph_index */ + + CFF_Decoder_Get_Glyph_Callback get_glyph_callback; + CFF_Decoder_Free_Glyph_Callback free_glyph_callback; + + } CFF_Decoder; + + + typedef const struct CFF_Decoder_FuncsRec_* CFF_Decoder_Funcs; + + typedef struct CFF_Decoder_FuncsRec_ + { + void + (*init)( CFF_Decoder* decoder, + TT_Face face, + CFF_Size size, + CFF_GlyphSlot slot, + FT_Bool hinting, + FT_Render_Mode hint_mode, + CFF_Decoder_Get_Glyph_Callback get_callback, + CFF_Decoder_Free_Glyph_Callback free_callback ); + + FT_Error + (*prepare)( CFF_Decoder* decoder, + CFF_Size size, + FT_UInt glyph_index ); + +#ifdef CFF_CONFIG_OPTION_OLD_ENGINE + FT_Error + (*parse_charstrings_old)( CFF_Decoder* decoder, + FT_Byte* charstring_base, + FT_ULong charstring_len, + FT_Bool in_dict ); +#endif + + FT_Error + (*parse_charstrings)( PS_Decoder* decoder, + FT_Byte* charstring_base, + FT_ULong charstring_len ); + + } CFF_Decoder_FuncsRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** AFM PARSER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + typedef struct AFM_ParserRec_* AFM_Parser; + + typedef struct AFM_Parser_FuncsRec_ + { + FT_Error + (*init)( AFM_Parser parser, + FT_Memory memory, + FT_Byte* base, + FT_Byte* limit ); + + void + (*done)( AFM_Parser parser ); + + FT_Error + (*parse)( AFM_Parser parser ); + + } AFM_Parser_FuncsRec; + + + typedef struct AFM_StreamRec_* AFM_Stream; + + + /************************************************************************** + * + * @struct: + * AFM_ParserRec + * + * @description: + * An AFM_Parser is a parser for the AFM files. + * + * @fields: + * memory :: + * The object used for memory operations (alloc and realloc). + * + * stream :: + * This is an opaque object. + * + * FontInfo :: + * The result will be stored here. + * + * get_index :: + * A user provided function to get a glyph index by its name. + */ + typedef struct AFM_ParserRec_ + { + FT_Memory memory; + AFM_Stream stream; + + AFM_FontInfo FontInfo; + + FT_Int + (*get_index)( const char* name, + FT_Offset len, + void* user_data ); + + void* user_data; + + } AFM_ParserRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** TYPE1 CHARMAPS *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + typedef const struct T1_CMap_ClassesRec_* T1_CMap_Classes; + + typedef struct T1_CMap_ClassesRec_ + { + FT_CMap_Class standard; + FT_CMap_Class expert; + FT_CMap_Class custom; + FT_CMap_Class unicode; + + } T1_CMap_ClassesRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** PSAux Module Interface *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + typedef struct PSAux_ServiceRec_ + { + /* don't use `PS_Table_Funcs' and friends to avoid compiler warnings */ + const PS_Table_FuncsRec* ps_table_funcs; + const PS_Parser_FuncsRec* ps_parser_funcs; + const T1_Builder_FuncsRec* t1_builder_funcs; + const T1_Decoder_FuncsRec* t1_decoder_funcs; + + void + (*t1_decrypt)( FT_Byte* buffer, + FT_Offset length, + FT_UShort seed ); + + FT_UInt32 + (*cff_random)( FT_UInt32 r ); + + void + (*ps_decoder_init)( PS_Decoder* ps_decoder, + void* decoder, + FT_Bool is_t1 ); + + void + (*t1_make_subfont)( FT_Face face, + PS_Private priv, + CFF_SubFont subfont ); + + T1_CMap_Classes t1_cmap_classes; + + /* fields after this comment line were added after version 2.1.10 */ + const AFM_Parser_FuncsRec* afm_parser_funcs; + + const CFF_Decoder_FuncsRec* cff_decoder_funcs; + + } PSAux_ServiceRec, *PSAux_Service; + + /* backward compatible type definition */ + typedef PSAux_ServiceRec PSAux_Interface; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** Some convenience functions *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + +#define IS_PS_NEWLINE( ch ) \ + ( (ch) == '\r' || \ + (ch) == '\n' ) + +#define IS_PS_SPACE( ch ) \ + ( (ch) == ' ' || \ + IS_PS_NEWLINE( ch ) || \ + (ch) == '\t' || \ + (ch) == '\f' || \ + (ch) == '\0' ) + +#define IS_PS_SPECIAL( ch ) \ + ( (ch) == '/' || \ + (ch) == '(' || (ch) == ')' || \ + (ch) == '<' || (ch) == '>' || \ + (ch) == '[' || (ch) == ']' || \ + (ch) == '{' || (ch) == '}' || \ + (ch) == '%' ) + +#define IS_PS_DELIM( ch ) \ + ( IS_PS_SPACE( ch ) || \ + IS_PS_SPECIAL( ch ) ) + +#define IS_PS_DIGIT( ch ) \ + ( (ch) >= '0' && (ch) <= '9' ) + +#define IS_PS_XDIGIT( ch ) \ + ( IS_PS_DIGIT( ch ) || \ + ( (ch) >= 'A' && (ch) <= 'F' ) || \ + ( (ch) >= 'a' && (ch) <= 'f' ) ) + +#define IS_PS_BASE85( ch ) \ + ( (ch) >= '!' && (ch) <= 'u' ) + +#define IS_PS_TOKEN( cur, limit, token ) \ + ( (char)(cur)[0] == (token)[0] && \ + ( (cur) + sizeof ( (token) ) == (limit) || \ + ( (cur) + sizeof( (token) ) < (limit) && \ + IS_PS_DELIM( (cur)[sizeof ( (token) ) - 1] ) ) ) && \ + ft_strncmp( (char*)(cur), (token), sizeof ( (token) ) - 1 ) == 0 ) + + +FT_END_HEADER + +#endif /* PSAUX_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/pshints.h b/Example/include/freetype/internal/pshints.h new file mode 100644 index 0000000..ededc4c --- /dev/null +++ b/Example/include/freetype/internal/pshints.h @@ -0,0 +1,699 @@ +/**************************************************************************** + * + * pshints.h + * + * Interface to Postscript-specific (Type 1 and Type 2) hints + * recorders (specification only). These are used to support native + * T1/T2 hints in the 'type1', 'cid', and 'cff' font drivers. + * + * Copyright (C) 2001-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef PSHINTS_H_ +#define PSHINTS_H_ + + +#include +#include + + +FT_BEGIN_HEADER + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** INTERNAL REPRESENTATION OF GLOBALS *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + typedef struct PSH_GlobalsRec_* PSH_Globals; + + typedef FT_Error + (*PSH_Globals_NewFunc)( FT_Memory memory, + T1_Private* private_dict, + PSH_Globals* aglobals ); + + typedef void + (*PSH_Globals_SetScaleFunc)( PSH_Globals globals, + FT_Fixed x_scale, + FT_Fixed y_scale, + FT_Fixed x_delta, + FT_Fixed y_delta ); + + typedef void + (*PSH_Globals_DestroyFunc)( PSH_Globals globals ); + + + typedef struct PSH_Globals_FuncsRec_ + { + PSH_Globals_NewFunc create; + PSH_Globals_SetScaleFunc set_scale; + PSH_Globals_DestroyFunc destroy; + + } PSH_Globals_FuncsRec, *PSH_Globals_Funcs; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** PUBLIC TYPE 1 HINTS RECORDER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + /************************************************************************** + * + * @type: + * T1_Hints + * + * @description: + * This is a handle to an opaque structure used to record glyph hints + * from a Type 1 character glyph character string. + * + * The methods used to operate on this object are defined by the + * @T1_Hints_FuncsRec structure. Recording glyph hints is normally + * achieved through the following scheme: + * + * - Open a new hint recording session by calling the 'open' method. + * This rewinds the recorder and prepare it for new input. + * + * - For each hint found in the glyph charstring, call the corresponding + * method ('stem', 'stem3', or 'reset'). Note that these functions do + * not return an error code. + * + * - Close the recording session by calling the 'close' method. It + * returns an error code if the hints were invalid or something strange + * happened (e.g., memory shortage). + * + * The hints accumulated in the object can later be used by the + * PostScript hinter. + * + */ + typedef struct T1_HintsRec_* T1_Hints; + + + /************************************************************************** + * + * @type: + * T1_Hints_Funcs + * + * @description: + * A pointer to the @T1_Hints_FuncsRec structure that defines the API of + * a given @T1_Hints object. + * + */ + typedef const struct T1_Hints_FuncsRec_* T1_Hints_Funcs; + + + /************************************************************************** + * + * @functype: + * T1_Hints_OpenFunc + * + * @description: + * A method of the @T1_Hints class used to prepare it for a new Type 1 + * hints recording session. + * + * @input: + * hints :: + * A handle to the Type 1 hints recorder. + * + * @note: + * You should always call the @T1_Hints_CloseFunc method in order to + * close an opened recording session. + * + */ + typedef void + (*T1_Hints_OpenFunc)( T1_Hints hints ); + + + /************************************************************************** + * + * @functype: + * T1_Hints_SetStemFunc + * + * @description: + * A method of the @T1_Hints class used to record a new horizontal or + * vertical stem. This corresponds to the Type 1 'hstem' and 'vstem' + * operators. + * + * @input: + * hints :: + * A handle to the Type 1 hints recorder. + * + * dimension :: + * 0 for horizontal stems (hstem), 1 for vertical ones (vstem). + * + * coords :: + * Array of 2 coordinates in 16.16 format, used as (position,length) + * stem descriptor. + * + * @note: + * Use vertical coordinates (y) for horizontal stems (dim=0). Use + * horizontal coordinates (x) for vertical stems (dim=1). + * + * 'coords[0]' is the absolute stem position (lowest coordinate); + * 'coords[1]' is the length. + * + * The length can be negative, in which case it must be either -20 or + * -21. It is interpreted as a 'ghost' stem, according to the Type 1 + * specification. + * + * If the length is -21 (corresponding to a bottom ghost stem), then the + * real stem position is 'coords[0]+coords[1]'. + * + */ + typedef void + (*T1_Hints_SetStemFunc)( T1_Hints hints, + FT_UInt dimension, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @functype: + * T1_Hints_SetStem3Func + * + * @description: + * A method of the @T1_Hints class used to record three + * counter-controlled horizontal or vertical stems at once. + * + * @input: + * hints :: + * A handle to the Type 1 hints recorder. + * + * dimension :: + * 0 for horizontal stems, 1 for vertical ones. + * + * coords :: + * An array of 6 values in 16.16 format, holding 3 (position,length) + * pairs for the counter-controlled stems. + * + * @note: + * Use vertical coordinates (y) for horizontal stems (dim=0). Use + * horizontal coordinates (x) for vertical stems (dim=1). + * + * The lengths cannot be negative (ghost stems are never + * counter-controlled). + * + */ + typedef void + (*T1_Hints_SetStem3Func)( T1_Hints hints, + FT_UInt dimension, + FT_Fixed* coords ); + + + /************************************************************************** + * + * @functype: + * T1_Hints_ResetFunc + * + * @description: + * A method of the @T1_Hints class used to reset the stems hints in a + * recording session. + * + * @input: + * hints :: + * A handle to the Type 1 hints recorder. + * + * end_point :: + * The index of the last point in the input glyph in which the + * previously defined hints apply. + * + */ + typedef void + (*T1_Hints_ResetFunc)( T1_Hints hints, + FT_UInt end_point ); + + + /************************************************************************** + * + * @functype: + * T1_Hints_CloseFunc + * + * @description: + * A method of the @T1_Hints class used to close a hint recording + * session. + * + * @input: + * hints :: + * A handle to the Type 1 hints recorder. + * + * end_point :: + * The index of the last point in the input glyph. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * The error code is set to indicate that an error occurred during the + * recording session. + * + */ + typedef FT_Error + (*T1_Hints_CloseFunc)( T1_Hints hints, + FT_UInt end_point ); + + + /************************************************************************** + * + * @functype: + * T1_Hints_ApplyFunc + * + * @description: + * A method of the @T1_Hints class used to apply hints to the + * corresponding glyph outline. Must be called once all hints have been + * recorded. + * + * @input: + * hints :: + * A handle to the Type 1 hints recorder. + * + * outline :: + * A pointer to the target outline descriptor. + * + * globals :: + * The hinter globals for this font. + * + * hint_mode :: + * Hinting information. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * On input, all points within the outline are in font coordinates. On + * output, they are in 1/64 of pixels. + * + * The scaling transformation is taken from the 'globals' object which + * must correspond to the same font as the glyph. + * + */ + typedef FT_Error + (*T1_Hints_ApplyFunc)( T1_Hints hints, + FT_Outline* outline, + PSH_Globals globals, + FT_Render_Mode hint_mode ); + + + /************************************************************************** + * + * @struct: + * T1_Hints_FuncsRec + * + * @description: + * The structure used to provide the API to @T1_Hints objects. + * + * @fields: + * hints :: + * A handle to the T1 Hints recorder. + * + * open :: + * The function to open a recording session. + * + * close :: + * The function to close a recording session. + * + * stem :: + * The function to set a simple stem. + * + * stem3 :: + * The function to set counter-controlled stems. + * + * reset :: + * The function to reset stem hints. + * + * apply :: + * The function to apply the hints to the corresponding glyph outline. + * + */ + typedef struct T1_Hints_FuncsRec_ + { + T1_Hints hints; + T1_Hints_OpenFunc open; + T1_Hints_CloseFunc close; + T1_Hints_SetStemFunc stem; + T1_Hints_SetStem3Func stem3; + T1_Hints_ResetFunc reset; + T1_Hints_ApplyFunc apply; + + } T1_Hints_FuncsRec; + + + /*************************************************************************/ + /*************************************************************************/ + /***** *****/ + /***** PUBLIC TYPE 2 HINTS RECORDER *****/ + /***** *****/ + /*************************************************************************/ + /*************************************************************************/ + + /************************************************************************** + * + * @type: + * T2_Hints + * + * @description: + * This is a handle to an opaque structure used to record glyph hints + * from a Type 2 character glyph character string. + * + * The methods used to operate on this object are defined by the + * @T2_Hints_FuncsRec structure. Recording glyph hints is normally + * achieved through the following scheme: + * + * - Open a new hint recording session by calling the 'open' method. + * This rewinds the recorder and prepare it for new input. + * + * - For each hint found in the glyph charstring, call the corresponding + * method ('stems', 'hintmask', 'counters'). Note that these functions + * do not return an error code. + * + * - Close the recording session by calling the 'close' method. It + * returns an error code if the hints were invalid or something strange + * happened (e.g., memory shortage). + * + * The hints accumulated in the object can later be used by the + * Postscript hinter. + * + */ + typedef struct T2_HintsRec_* T2_Hints; + + + /************************************************************************** + * + * @type: + * T2_Hints_Funcs + * + * @description: + * A pointer to the @T2_Hints_FuncsRec structure that defines the API of + * a given @T2_Hints object. + * + */ + typedef const struct T2_Hints_FuncsRec_* T2_Hints_Funcs; + + + /************************************************************************** + * + * @functype: + * T2_Hints_OpenFunc + * + * @description: + * A method of the @T2_Hints class used to prepare it for a new Type 2 + * hints recording session. + * + * @input: + * hints :: + * A handle to the Type 2 hints recorder. + * + * @note: + * You should always call the @T2_Hints_CloseFunc method in order to + * close an opened recording session. + * + */ + typedef void + (*T2_Hints_OpenFunc)( T2_Hints hints ); + + + /************************************************************************** + * + * @functype: + * T2_Hints_StemsFunc + * + * @description: + * A method of the @T2_Hints class used to set the table of stems in + * either the vertical or horizontal dimension. Equivalent to the + * 'hstem', 'vstem', 'hstemhm', and 'vstemhm' Type 2 operators. + * + * @input: + * hints :: + * A handle to the Type 2 hints recorder. + * + * dimension :: + * 0 for horizontal stems (hstem), 1 for vertical ones (vstem). + * + * count :: + * The number of stems. + * + * coords :: + * An array of 'count' (position,length) pairs in 16.16 format. + * + * @note: + * Use vertical coordinates (y) for horizontal stems (dim=0). Use + * horizontal coordinates (x) for vertical stems (dim=1). + * + * There are '2*count' elements in the 'coords' array. Each even element + * is an absolute position in font units, each odd element is a length in + * font units. + * + * A length can be negative, in which case it must be either -20 or -21. + * It is interpreted as a 'ghost' stem, according to the Type 1 + * specification. + * + */ + typedef void + (*T2_Hints_StemsFunc)( T2_Hints hints, + FT_UInt dimension, + FT_Int count, + FT_Fixed* coordinates ); + + + /************************************************************************** + * + * @functype: + * T2_Hints_MaskFunc + * + * @description: + * A method of the @T2_Hints class used to set a given hintmask (this + * corresponds to the 'hintmask' Type 2 operator). + * + * @input: + * hints :: + * A handle to the Type 2 hints recorder. + * + * end_point :: + * The glyph index of the last point to which the previously defined or + * activated hints apply. + * + * bit_count :: + * The number of bits in the hint mask. + * + * bytes :: + * An array of bytes modelling the hint mask. + * + * @note: + * If the hintmask starts the charstring (before any glyph point + * definition), the value of `end_point` should be 0. + * + * `bit_count` is the number of meaningful bits in the 'bytes' array; it + * must be equal to the total number of hints defined so far (i.e., + * horizontal+verticals). + * + * The 'bytes' array can come directly from the Type 2 charstring and + * respects the same format. + * + */ + typedef void + (*T2_Hints_MaskFunc)( T2_Hints hints, + FT_UInt end_point, + FT_UInt bit_count, + const FT_Byte* bytes ); + + + /************************************************************************** + * + * @functype: + * T2_Hints_CounterFunc + * + * @description: + * A method of the @T2_Hints class used to set a given counter mask (this + * corresponds to the 'hintmask' Type 2 operator). + * + * @input: + * hints :: + * A handle to the Type 2 hints recorder. + * + * end_point :: + * A glyph index of the last point to which the previously defined or + * active hints apply. + * + * bit_count :: + * The number of bits in the hint mask. + * + * bytes :: + * An array of bytes modelling the hint mask. + * + * @note: + * If the hintmask starts the charstring (before any glyph point + * definition), the value of `end_point` should be 0. + * + * `bit_count` is the number of meaningful bits in the 'bytes' array; it + * must be equal to the total number of hints defined so far (i.e., + * horizontal+verticals). + * + * The 'bytes' array can come directly from the Type 2 charstring and + * respects the same format. + * + */ + typedef void + (*T2_Hints_CounterFunc)( T2_Hints hints, + FT_UInt bit_count, + const FT_Byte* bytes ); + + + /************************************************************************** + * + * @functype: + * T2_Hints_CloseFunc + * + * @description: + * A method of the @T2_Hints class used to close a hint recording + * session. + * + * @input: + * hints :: + * A handle to the Type 2 hints recorder. + * + * end_point :: + * The index of the last point in the input glyph. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * The error code is set to indicate that an error occurred during the + * recording session. + * + */ + typedef FT_Error + (*T2_Hints_CloseFunc)( T2_Hints hints, + FT_UInt end_point ); + + + /************************************************************************** + * + * @functype: + * T2_Hints_ApplyFunc + * + * @description: + * A method of the @T2_Hints class used to apply hints to the + * corresponding glyph outline. Must be called after the 'close' method. + * + * @input: + * hints :: + * A handle to the Type 2 hints recorder. + * + * outline :: + * A pointer to the target outline descriptor. + * + * globals :: + * The hinter globals for this font. + * + * hint_mode :: + * Hinting information. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * On input, all points within the outline are in font coordinates. On + * output, they are in 1/64 of pixels. + * + * The scaling transformation is taken from the 'globals' object which + * must correspond to the same font than the glyph. + * + */ + typedef FT_Error + (*T2_Hints_ApplyFunc)( T2_Hints hints, + FT_Outline* outline, + PSH_Globals globals, + FT_Render_Mode hint_mode ); + + + /************************************************************************** + * + * @struct: + * T2_Hints_FuncsRec + * + * @description: + * The structure used to provide the API to @T2_Hints objects. + * + * @fields: + * hints :: + * A handle to the T2 hints recorder object. + * + * open :: + * The function to open a recording session. + * + * close :: + * The function to close a recording session. + * + * stems :: + * The function to set the dimension's stems table. + * + * hintmask :: + * The function to set hint masks. + * + * counter :: + * The function to set counter masks. + * + * apply :: + * The function to apply the hints on the corresponding glyph outline. + * + */ + typedef struct T2_Hints_FuncsRec_ + { + T2_Hints hints; + T2_Hints_OpenFunc open; + T2_Hints_CloseFunc close; + T2_Hints_StemsFunc stems; + T2_Hints_MaskFunc hintmask; + T2_Hints_CounterFunc counter; + T2_Hints_ApplyFunc apply; + + } T2_Hints_FuncsRec; + + + /* */ + + + typedef struct PSHinter_Interface_ + { + PSH_Globals_Funcs (*get_globals_funcs)( FT_Module module ); + T1_Hints_Funcs (*get_t1_funcs) ( FT_Module module ); + T2_Hints_Funcs (*get_t2_funcs) ( FT_Module module ); + + } PSHinter_Interface; + + typedef PSHinter_Interface* PSHinter_Service; + + +#define FT_DEFINE_PSHINTER_INTERFACE( \ + class_, \ + get_globals_funcs_, \ + get_t1_funcs_, \ + get_t2_funcs_ ) \ + static const PSHinter_Interface class_ = \ + { \ + get_globals_funcs_, \ + get_t1_funcs_, \ + get_t2_funcs_ \ + }; + + +FT_END_HEADER + +#endif /* PSHINTS_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svbdf.h b/Example/include/freetype/internal/services/svbdf.h new file mode 100644 index 0000000..bf0c1dc --- /dev/null +++ b/Example/include/freetype/internal/services/svbdf.h @@ -0,0 +1,66 @@ +/**************************************************************************** + * + * svbdf.h + * + * The FreeType BDF services (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVBDF_H_ +#define SVBDF_H_ + +#include +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_BDF "bdf" + + typedef FT_Error + (*FT_BDF_GetCharsetIdFunc)( FT_Face face, + const char* *acharset_encoding, + const char* *acharset_registry ); + + typedef FT_Error + (*FT_BDF_GetPropertyFunc)( FT_Face face, + const char* prop_name, + BDF_PropertyRec *aproperty ); + + + FT_DEFINE_SERVICE( BDF ) + { + FT_BDF_GetCharsetIdFunc get_charset_id; + FT_BDF_GetPropertyFunc get_property; + }; + + +#define FT_DEFINE_SERVICE_BDFRec( class_, \ + get_charset_id_, \ + get_property_ ) \ + static const FT_Service_BDFRec class_ = \ + { \ + get_charset_id_, get_property_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVBDF_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svcfftl.h b/Example/include/freetype/internal/services/svcfftl.h new file mode 100644 index 0000000..4a20498 --- /dev/null +++ b/Example/include/freetype/internal/services/svcfftl.h @@ -0,0 +1,90 @@ +/**************************************************************************** + * + * svcfftl.h + * + * The FreeType CFF tables loader service (specification). + * + * Copyright (C) 2017-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVCFFTL_H_ +#define SVCFFTL_H_ + +#include +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_CFF_LOAD "cff-load" + + + typedef FT_UShort + (*FT_Get_Standard_Encoding_Func)( FT_UInt charcode ); + + typedef FT_Error + (*FT_Load_Private_Dict_Func)( CFF_Font font, + CFF_SubFont subfont, + FT_UInt lenNDV, + FT_Fixed* NDV ); + + typedef FT_Byte + (*FT_FD_Select_Get_Func)( CFF_FDSelect fdselect, + FT_UInt glyph_index ); + + typedef FT_Bool + (*FT_Blend_Check_Vector_Func)( CFF_Blend blend, + FT_UInt vsindex, + FT_UInt lenNDV, + FT_Fixed* NDV ); + + typedef FT_Error + (*FT_Blend_Build_Vector_Func)( CFF_Blend blend, + FT_UInt vsindex, + FT_UInt lenNDV, + FT_Fixed* NDV ); + + + FT_DEFINE_SERVICE( CFFLoad ) + { + FT_Get_Standard_Encoding_Func get_standard_encoding; + FT_Load_Private_Dict_Func load_private_dict; + FT_FD_Select_Get_Func fd_select_get; + FT_Blend_Check_Vector_Func blend_check_vector; + FT_Blend_Build_Vector_Func blend_build_vector; + }; + + +#define FT_DEFINE_SERVICE_CFFLOADREC( class_, \ + get_standard_encoding_, \ + load_private_dict_, \ + fd_select_get_, \ + blend_check_vector_, \ + blend_build_vector_ ) \ + static const FT_Service_CFFLoadRec class_ = \ + { \ + get_standard_encoding_, \ + load_private_dict_, \ + fd_select_get_, \ + blend_check_vector_, \ + blend_build_vector_ \ + }; + + +FT_END_HEADER + + +#endif + + +/* END */ diff --git a/Example/include/freetype/internal/services/svcid.h b/Example/include/freetype/internal/services/svcid.h new file mode 100644 index 0000000..06d0cb8 --- /dev/null +++ b/Example/include/freetype/internal/services/svcid.h @@ -0,0 +1,69 @@ +/**************************************************************************** + * + * svcid.h + * + * The FreeType CID font services (specification). + * + * Copyright (C) 2007-2023 by + * Derek Clegg and Michael Toftdal. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVCID_H_ +#define SVCID_H_ + +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_CID "CID" + + typedef FT_Error + (*FT_CID_GetRegistryOrderingSupplementFunc)( FT_Face face, + const char* *registry, + const char* *ordering, + FT_Int *supplement ); + typedef FT_Error + (*FT_CID_GetIsInternallyCIDKeyedFunc)( FT_Face face, + FT_Bool *is_cid ); + typedef FT_Error + (*FT_CID_GetCIDFromGlyphIndexFunc)( FT_Face face, + FT_UInt glyph_index, + FT_UInt *cid ); + + FT_DEFINE_SERVICE( CID ) + { + FT_CID_GetRegistryOrderingSupplementFunc get_ros; + FT_CID_GetIsInternallyCIDKeyedFunc get_is_cid; + FT_CID_GetCIDFromGlyphIndexFunc get_cid_from_glyph_index; + }; + + +#define FT_DEFINE_SERVICE_CIDREC( class_, \ + get_ros_, \ + get_is_cid_, \ + get_cid_from_glyph_index_ ) \ + static const FT_Service_CIDRec class_ = \ + { \ + get_ros_, get_is_cid_, get_cid_from_glyph_index_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVCID_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svfntfmt.h b/Example/include/freetype/internal/services/svfntfmt.h new file mode 100644 index 0000000..bc45e80 --- /dev/null +++ b/Example/include/freetype/internal/services/svfntfmt.h @@ -0,0 +1,55 @@ +/**************************************************************************** + * + * svfntfmt.h + * + * The FreeType font format service (specification only). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVFNTFMT_H_ +#define SVFNTFMT_H_ + +#include + + +FT_BEGIN_HEADER + + + /* + * A trivial service used to return the name of a face's font driver, + * according to the XFree86 nomenclature. Note that the service data is a + * simple constant string pointer. + */ + +#define FT_SERVICE_ID_FONT_FORMAT "font-format" + +#define FT_FONT_FORMAT_TRUETYPE "TrueType" +#define FT_FONT_FORMAT_TYPE_1 "Type 1" +#define FT_FONT_FORMAT_BDF "BDF" +#define FT_FONT_FORMAT_PCF "PCF" +#define FT_FONT_FORMAT_TYPE_42 "Type 42" +#define FT_FONT_FORMAT_CID "CID Type 1" +#define FT_FONT_FORMAT_CFF "CFF" +#define FT_FONT_FORMAT_PFR "PFR" +#define FT_FONT_FORMAT_WINFNT "Windows FNT" + + /* */ + + +FT_END_HEADER + + +#endif /* SVFNTFMT_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svgldict.h b/Example/include/freetype/internal/services/svgldict.h new file mode 100644 index 0000000..6437abf --- /dev/null +++ b/Example/include/freetype/internal/services/svgldict.h @@ -0,0 +1,72 @@ +/**************************************************************************** + * + * svgldict.h + * + * The FreeType glyph dictionary services (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVGLDICT_H_ +#define SVGLDICT_H_ + +#include + + +FT_BEGIN_HEADER + + + /* + * A service used to retrieve glyph names, as well as to find the index of + * a given glyph name in a font. + * + */ + +#define FT_SERVICE_ID_GLYPH_DICT "glyph-dict" + + + typedef FT_Error + (*FT_GlyphDict_GetNameFunc)( FT_Face face, + FT_UInt glyph_index, + FT_Pointer buffer, + FT_UInt buffer_max ); + + typedef FT_UInt + (*FT_GlyphDict_NameIndexFunc)( FT_Face face, + const FT_String* glyph_name ); + + + FT_DEFINE_SERVICE( GlyphDict ) + { + FT_GlyphDict_GetNameFunc get_name; + FT_GlyphDict_NameIndexFunc name_index; /* optional */ + }; + + +#define FT_DEFINE_SERVICE_GLYPHDICTREC( class_, \ + get_name_, \ + name_index_ ) \ + static const FT_Service_GlyphDictRec class_ = \ + { \ + get_name_, name_index_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVGLDICT_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svgxval.h b/Example/include/freetype/internal/services/svgxval.h new file mode 100644 index 0000000..31016af --- /dev/null +++ b/Example/include/freetype/internal/services/svgxval.h @@ -0,0 +1,72 @@ +/**************************************************************************** + * + * svgxval.h + * + * FreeType API for validating TrueTypeGX/AAT tables (specification). + * + * Copyright (C) 2004-2023 by + * Masatake YAMATO, Red Hat K.K., + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + +/**************************************************************************** + * + * gxvalid is derived from both gxlayout module and otvalid module. + * Development of gxlayout is supported by the Information-technology + * Promotion Agency(IPA), Japan. + * + */ + + +#ifndef SVGXVAL_H_ +#define SVGXVAL_H_ + +#include +#include + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_GX_VALIDATE "truetypegx-validate" +#define FT_SERVICE_ID_CLASSICKERN_VALIDATE "classickern-validate" + + typedef FT_Error + (*gxv_validate_func)( FT_Face face, + FT_UInt gx_flags, + FT_Bytes tables[FT_VALIDATE_GX_LENGTH], + FT_UInt table_length ); + + + typedef FT_Error + (*ckern_validate_func)( FT_Face face, + FT_UInt ckern_flags, + FT_Bytes *ckern_table ); + + + FT_DEFINE_SERVICE( GXvalidate ) + { + gxv_validate_func validate; + }; + + FT_DEFINE_SERVICE( CKERNvalidate ) + { + ckern_validate_func validate; + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVGXVAL_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svkern.h b/Example/include/freetype/internal/services/svkern.h new file mode 100644 index 0000000..bcabbc3 --- /dev/null +++ b/Example/include/freetype/internal/services/svkern.h @@ -0,0 +1,51 @@ +/**************************************************************************** + * + * svkern.h + * + * The FreeType Kerning service (specification). + * + * Copyright (C) 2006-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVKERN_H_ +#define SVKERN_H_ + +#include +#include + + +FT_BEGIN_HEADER + +#define FT_SERVICE_ID_KERNING "kerning" + + + typedef FT_Error + (*FT_Kerning_TrackGetFunc)( FT_Face face, + FT_Fixed point_size, + FT_Int degree, + FT_Fixed* akerning ); + + FT_DEFINE_SERVICE( Kerning ) + { + FT_Kerning_TrackGetFunc get_track; + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVKERN_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svmetric.h b/Example/include/freetype/internal/services/svmetric.h new file mode 100644 index 0000000..167617e --- /dev/null +++ b/Example/include/freetype/internal/services/svmetric.h @@ -0,0 +1,131 @@ +/**************************************************************************** + * + * svmetric.h + * + * The FreeType services for metrics variations (specification). + * + * Copyright (C) 2016-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVMETRIC_H_ +#define SVMETRIC_H_ + +#include + + +FT_BEGIN_HEADER + + + /* + * A service to manage the `HVAR, `MVAR', and `VVAR' OpenType tables. + * + */ + +#define FT_SERVICE_ID_METRICS_VARIATIONS "metrics-variations" + + + /* HVAR */ + + typedef FT_Error + (*FT_HAdvance_Adjust_Func)( FT_Face face, + FT_UInt gindex, + FT_Int *avalue ); + + typedef FT_Error + (*FT_LSB_Adjust_Func)( FT_Face face, + FT_UInt gindex, + FT_Int *avalue ); + + typedef FT_Error + (*FT_RSB_Adjust_Func)( FT_Face face, + FT_UInt gindex, + FT_Int *avalue ); + + /* VVAR */ + + typedef FT_Error + (*FT_VAdvance_Adjust_Func)( FT_Face face, + FT_UInt gindex, + FT_Int *avalue ); + + typedef FT_Error + (*FT_TSB_Adjust_Func)( FT_Face face, + FT_UInt gindex, + FT_Int *avalue ); + + typedef FT_Error + (*FT_BSB_Adjust_Func)( FT_Face face, + FT_UInt gindex, + FT_Int *avalue ); + + typedef FT_Error + (*FT_VOrg_Adjust_Func)( FT_Face face, + FT_UInt gindex, + FT_Int *avalue ); + + /* MVAR */ + + typedef void + (*FT_Metrics_Adjust_Func)( FT_Face face ); + + typedef FT_Error + (*FT_Size_Reset_Func)( FT_Size size ); + + + FT_DEFINE_SERVICE( MetricsVariations ) + { + FT_HAdvance_Adjust_Func hadvance_adjust; + FT_LSB_Adjust_Func lsb_adjust; + FT_RSB_Adjust_Func rsb_adjust; + + FT_VAdvance_Adjust_Func vadvance_adjust; + FT_TSB_Adjust_Func tsb_adjust; + FT_BSB_Adjust_Func bsb_adjust; + FT_VOrg_Adjust_Func vorg_adjust; + + FT_Metrics_Adjust_Func metrics_adjust; + FT_Size_Reset_Func size_reset; + }; + + +#define FT_DEFINE_SERVICE_METRICSVARIATIONSREC( class_, \ + hadvance_adjust_, \ + lsb_adjust_, \ + rsb_adjust_, \ + vadvance_adjust_, \ + tsb_adjust_, \ + bsb_adjust_, \ + vorg_adjust_, \ + metrics_adjust_, \ + size_reset_ ) \ + static const FT_Service_MetricsVariationsRec class_ = \ + { \ + hadvance_adjust_, \ + lsb_adjust_, \ + rsb_adjust_, \ + vadvance_adjust_, \ + tsb_adjust_, \ + bsb_adjust_, \ + vorg_adjust_, \ + metrics_adjust_, \ + size_reset_ \ + }; + + /* */ + + +FT_END_HEADER + +#endif /* SVMETRIC_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svmm.h b/Example/include/freetype/internal/services/svmm.h new file mode 100644 index 0000000..7e76ab8 --- /dev/null +++ b/Example/include/freetype/internal/services/svmm.h @@ -0,0 +1,214 @@ +/**************************************************************************** + * + * svmm.h + * + * The FreeType Multiple Masters and GX var services (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, Werner Lemberg, and Dominik Röttsches. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVMM_H_ +#define SVMM_H_ + +#include +#include +#include + + +FT_BEGIN_HEADER + + + /* + * A service used to manage multiple-masters data in a given face. + * + * See the related APIs in `ftmm.h' (FT_MULTIPLE_MASTERS_H). + * + */ + +#define FT_SERVICE_ID_MULTI_MASTERS "multi-masters" + + + typedef FT_Error + (*FT_Get_MM_Func)( FT_Face face, + FT_Multi_Master* master ); + + typedef FT_Error + (*FT_Get_MM_Var_Func)( FT_Face face, + FT_MM_Var* *master ); + + typedef FT_Error + (*FT_Set_MM_Design_Func)( FT_Face face, + FT_UInt num_coords, + FT_Long* coords ); + + /* use return value -1 to indicate that the new coordinates */ + /* are equal to the current ones; no changes are thus needed */ + typedef FT_Error + (*FT_Set_Var_Design_Func)( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + /* use return value -1 to indicate that the new coordinates */ + /* are equal to the current ones; no changes are thus needed */ + typedef FT_Error + (*FT_Set_MM_Blend_Func)( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + typedef FT_Error + (*FT_Get_Var_Design_Func)( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + typedef FT_Error + (*FT_Set_Named_Instance_Func)( FT_Face face, + FT_UInt instance_index ); + + typedef FT_Error + (*FT_Get_Default_Named_Instance_Func)( FT_Face face, + FT_UInt *instance_index ); + + typedef FT_Error + (*FT_Get_MM_Blend_Func)( FT_Face face, + FT_UInt num_coords, + FT_Fixed* coords ); + + typedef FT_Error + (*FT_Get_Var_Blend_Func)( FT_Face face, + FT_UInt *num_coords, + FT_Fixed* *coords, + FT_Fixed* *normalizedcoords, + FT_MM_Var* *mm_var ); + + typedef void + (*FT_Done_Blend_Func)( FT_Face face ); + + typedef FT_Error + (*FT_Set_MM_WeightVector_Func)( FT_Face face, + FT_UInt len, + FT_Fixed* weight_vector ); + + typedef FT_Error + (*FT_Get_MM_WeightVector_Func)( FT_Face face, + FT_UInt* len, + FT_Fixed* weight_vector ); + + typedef void + (*FT_Construct_PS_Name_Func)( FT_Face face ); + + typedef FT_Error + (*FT_Var_Load_Delta_Set_Idx_Map_Func)( FT_Face face, + FT_ULong offset, + GX_DeltaSetIdxMap map, + GX_ItemVarStore itemStore, + FT_ULong table_len ); + + typedef FT_Error + (*FT_Var_Load_Item_Var_Store_Func)( FT_Face face, + FT_ULong offset, + GX_ItemVarStore itemStore ); + + typedef FT_ItemVarDelta + (*FT_Var_Get_Item_Delta_Func)( FT_Face face, + GX_ItemVarStore itemStore, + FT_UInt outerIndex, + FT_UInt innerIndex ); + + typedef void + (*FT_Var_Done_Item_Var_Store_Func)( FT_Face face, + GX_ItemVarStore itemStore ); + + typedef void + (*FT_Var_Done_Delta_Set_Idx_Map_Func)( FT_Face face, + GX_DeltaSetIdxMap deltaSetIdxMap ); + + + FT_DEFINE_SERVICE( MultiMasters ) + { + FT_Get_MM_Func get_mm; + FT_Set_MM_Design_Func set_mm_design; + FT_Set_MM_Blend_Func set_mm_blend; + FT_Get_MM_Blend_Func get_mm_blend; + FT_Get_MM_Var_Func get_mm_var; + FT_Set_Var_Design_Func set_var_design; + FT_Get_Var_Design_Func get_var_design; + FT_Set_Named_Instance_Func set_named_instance; + FT_Get_Default_Named_Instance_Func get_default_named_instance; + FT_Set_MM_WeightVector_Func set_mm_weightvector; + FT_Get_MM_WeightVector_Func get_mm_weightvector; + + /* for internal use; only needed for code sharing between modules */ + FT_Construct_PS_Name_Func construct_ps_name; + FT_Var_Load_Delta_Set_Idx_Map_Func load_delta_set_idx_map; + FT_Var_Load_Item_Var_Store_Func load_item_var_store; + FT_Var_Get_Item_Delta_Func get_item_delta; + FT_Var_Done_Item_Var_Store_Func done_item_var_store; + FT_Var_Done_Delta_Set_Idx_Map_Func done_delta_set_idx_map; + FT_Get_Var_Blend_Func get_var_blend; + FT_Done_Blend_Func done_blend; + }; + + +#define FT_DEFINE_SERVICE_MULTIMASTERSREC( class_, \ + get_mm_, \ + set_mm_design_, \ + set_mm_blend_, \ + get_mm_blend_, \ + get_mm_var_, \ + set_var_design_, \ + get_var_design_, \ + set_named_instance_, \ + get_default_named_instance_, \ + set_mm_weightvector_, \ + get_mm_weightvector_, \ + \ + construct_ps_name_, \ + load_delta_set_idx_map_, \ + load_item_var_store_, \ + get_item_delta_, \ + done_item_var_store_, \ + done_delta_set_idx_map_, \ + get_var_blend_, \ + done_blend_ ) \ + static const FT_Service_MultiMastersRec class_ = \ + { \ + get_mm_, \ + set_mm_design_, \ + set_mm_blend_, \ + get_mm_blend_, \ + get_mm_var_, \ + set_var_design_, \ + get_var_design_, \ + set_named_instance_, \ + get_default_named_instance_, \ + set_mm_weightvector_, \ + get_mm_weightvector_, \ + \ + construct_ps_name_, \ + load_delta_set_idx_map_, \ + load_item_var_store_, \ + get_item_delta_, \ + done_item_var_store_, \ + done_delta_set_idx_map_, \ + get_var_blend_, \ + done_blend_ \ + }; + + /* */ + + +FT_END_HEADER + +#endif /* SVMM_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svotval.h b/Example/include/freetype/internal/services/svotval.h new file mode 100644 index 0000000..a4683cd --- /dev/null +++ b/Example/include/freetype/internal/services/svotval.h @@ -0,0 +1,55 @@ +/**************************************************************************** + * + * svotval.h + * + * The FreeType OpenType validation service (specification). + * + * Copyright (C) 2004-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVOTVAL_H_ +#define SVOTVAL_H_ + +#include +#include + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_OPENTYPE_VALIDATE "opentype-validate" + + + typedef FT_Error + (*otv_validate_func)( FT_Face volatile face, + FT_UInt ot_flags, + FT_Bytes *base, + FT_Bytes *gdef, + FT_Bytes *gpos, + FT_Bytes *gsub, + FT_Bytes *jstf ); + + + FT_DEFINE_SERVICE( OTvalidate ) + { + otv_validate_func validate; + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVOTVAL_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svpfr.h b/Example/include/freetype/internal/services/svpfr.h new file mode 100644 index 0000000..fd189c7 --- /dev/null +++ b/Example/include/freetype/internal/services/svpfr.h @@ -0,0 +1,65 @@ +/**************************************************************************** + * + * svpfr.h + * + * Internal PFR service functions (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVPFR_H_ +#define SVPFR_H_ + +#include +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_PFR_METRICS "pfr-metrics" + + + typedef FT_Error + (*FT_PFR_GetMetricsFunc)( FT_Face face, + FT_UInt *aoutline, + FT_UInt *ametrics, + FT_Fixed *ax_scale, + FT_Fixed *ay_scale ); + + typedef FT_Error + (*FT_PFR_GetKerningFunc)( FT_Face face, + FT_UInt left, + FT_UInt right, + FT_Vector *avector ); + + typedef FT_Error + (*FT_PFR_GetAdvanceFunc)( FT_Face face, + FT_UInt gindex, + FT_Pos *aadvance ); + + + FT_DEFINE_SERVICE( PfrMetrics ) + { + FT_PFR_GetMetricsFunc get_metrics; + FT_PFR_GetKerningFunc get_kerning; + FT_PFR_GetAdvanceFunc get_advance; + + }; + + +FT_END_HEADER + +#endif /* SVPFR_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svpostnm.h b/Example/include/freetype/internal/services/svpostnm.h new file mode 100644 index 0000000..2b8f6df --- /dev/null +++ b/Example/include/freetype/internal/services/svpostnm.h @@ -0,0 +1,65 @@ +/**************************************************************************** + * + * svpostnm.h + * + * The FreeType PostScript name services (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVPOSTNM_H_ +#define SVPOSTNM_H_ + +#include + + +FT_BEGIN_HEADER + + /* + * A trivial service used to retrieve the PostScript name of a given font + * when available. The `get_name' field should never be `NULL`. + * + * The corresponding function can return `NULL` to indicate that the + * PostScript name is not available. + * + * The name is owned by the face and will be destroyed with it. + */ + +#define FT_SERVICE_ID_POSTSCRIPT_FONT_NAME "postscript-font-name" + + + typedef const char* + (*FT_PsName_GetFunc)( FT_Face face ); + + + FT_DEFINE_SERVICE( PsFontName ) + { + FT_PsName_GetFunc get_ps_font_name; + }; + + +#define FT_DEFINE_SERVICE_PSFONTNAMEREC( class_, get_ps_font_name_ ) \ + static const FT_Service_PsFontNameRec class_ = \ + { \ + get_ps_font_name_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVPOSTNM_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svprop.h b/Example/include/freetype/internal/services/svprop.h new file mode 100644 index 0000000..932ce32 --- /dev/null +++ b/Example/include/freetype/internal/services/svprop.h @@ -0,0 +1,66 @@ +/**************************************************************************** + * + * svprop.h + * + * The FreeType property service (specification). + * + * Copyright (C) 2012-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVPROP_H_ +#define SVPROP_H_ + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_PROPERTIES "properties" + + + typedef FT_Error + (*FT_Properties_SetFunc)( FT_Module module, + const char* property_name, + const void* value, + FT_Bool value_is_string ); + + typedef FT_Error + (*FT_Properties_GetFunc)( FT_Module module, + const char* property_name, + void* value ); + + + FT_DEFINE_SERVICE( Properties ) + { + FT_Properties_SetFunc set_property; + FT_Properties_GetFunc get_property; + }; + + +#define FT_DEFINE_SERVICE_PROPERTIESREC( class_, \ + set_property_, \ + get_property_ ) \ + static const FT_Service_PropertiesRec class_ = \ + { \ + set_property_, \ + get_property_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVPROP_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svpscmap.h b/Example/include/freetype/internal/services/svpscmap.h new file mode 100644 index 0000000..6e599f3 --- /dev/null +++ b/Example/include/freetype/internal/services/svpscmap.h @@ -0,0 +1,145 @@ +/**************************************************************************** + * + * svpscmap.h + * + * The FreeType PostScript charmap service (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVPSCMAP_H_ +#define SVPSCMAP_H_ + +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_POSTSCRIPT_CMAPS "postscript-cmaps" + + + /* + * Adobe glyph name to unicode value. + */ + typedef FT_UInt32 + (*PS_Unicode_ValueFunc)( const char* glyph_name ); + + /* + * Macintosh name id to glyph name. `NULL` if invalid index. + */ + typedef const char* + (*PS_Macintosh_NameFunc)( FT_UInt name_index ); + + /* + * Adobe standard string ID to glyph name. `NULL` if invalid index. + */ + typedef const char* + (*PS_Adobe_Std_StringsFunc)( FT_UInt string_index ); + + + /* + * Simple unicode -> glyph index charmap built from font glyph names table. + */ + typedef struct PS_UniMap_ + { + FT_UInt32 unicode; /* bit 31 set: is glyph variant */ + FT_UInt glyph_index; + + } PS_UniMap; + + + typedef struct PS_UnicodesRec_* PS_Unicodes; + + typedef struct PS_UnicodesRec_ + { + FT_CMapRec cmap; + FT_UInt num_maps; + PS_UniMap* maps; + + } PS_UnicodesRec; + + + /* + * A function which returns a glyph name for a given index. Returns + * `NULL` if invalid index. + */ + typedef const char* + (*PS_GetGlyphNameFunc)( FT_Pointer data, + FT_UInt string_index ); + + /* + * A function used to release the glyph name returned by + * PS_GetGlyphNameFunc, when needed + */ + typedef void + (*PS_FreeGlyphNameFunc)( FT_Pointer data, + const char* name ); + + typedef FT_Error + (*PS_Unicodes_InitFunc)( FT_Memory memory, + PS_Unicodes unicodes, + FT_UInt num_glyphs, + PS_GetGlyphNameFunc get_glyph_name, + PS_FreeGlyphNameFunc free_glyph_name, + FT_Pointer glyph_data ); + + typedef FT_UInt + (*PS_Unicodes_CharIndexFunc)( PS_Unicodes unicodes, + FT_UInt32 unicode ); + + typedef FT_UInt + (*PS_Unicodes_CharNextFunc)( PS_Unicodes unicodes, + FT_UInt32 *unicode ); + + + FT_DEFINE_SERVICE( PsCMaps ) + { + PS_Unicode_ValueFunc unicode_value; + + PS_Unicodes_InitFunc unicodes_init; + PS_Unicodes_CharIndexFunc unicodes_char_index; + PS_Unicodes_CharNextFunc unicodes_char_next; + + PS_Macintosh_NameFunc macintosh_name; + PS_Adobe_Std_StringsFunc adobe_std_strings; + const unsigned short* adobe_std_encoding; + const unsigned short* adobe_expert_encoding; + }; + + +#define FT_DEFINE_SERVICE_PSCMAPSREC( class_, \ + unicode_value_, \ + unicodes_init_, \ + unicodes_char_index_, \ + unicodes_char_next_, \ + macintosh_name_, \ + adobe_std_strings_, \ + adobe_std_encoding_, \ + adobe_expert_encoding_ ) \ + static const FT_Service_PsCMapsRec class_ = \ + { \ + unicode_value_, unicodes_init_, \ + unicodes_char_index_, unicodes_char_next_, macintosh_name_, \ + adobe_std_strings_, adobe_std_encoding_, adobe_expert_encoding_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVPSCMAP_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svpsinfo.h b/Example/include/freetype/internal/services/svpsinfo.h new file mode 100644 index 0000000..09c4cdc --- /dev/null +++ b/Example/include/freetype/internal/services/svpsinfo.h @@ -0,0 +1,86 @@ +/**************************************************************************** + * + * svpsinfo.h + * + * The FreeType PostScript info service (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVPSINFO_H_ +#define SVPSINFO_H_ + +#include +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_POSTSCRIPT_INFO "postscript-info" + + + typedef FT_Error + (*PS_GetFontInfoFunc)( FT_Face face, + PS_FontInfoRec* afont_info ); + + typedef FT_Error + (*PS_GetFontExtraFunc)( FT_Face face, + PS_FontExtraRec* afont_extra ); + + typedef FT_Int + (*PS_HasGlyphNamesFunc)( FT_Face face ); + + typedef FT_Error + (*PS_GetFontPrivateFunc)( FT_Face face, + PS_PrivateRec* afont_private ); + + typedef FT_Long + (*PS_GetFontValueFunc)( FT_Face face, + PS_Dict_Keys key, + FT_UInt idx, + void *value, + FT_Long value_len ); + + + FT_DEFINE_SERVICE( PsInfo ) + { + PS_GetFontInfoFunc ps_get_font_info; + PS_GetFontExtraFunc ps_get_font_extra; + PS_HasGlyphNamesFunc ps_has_glyph_names; + PS_GetFontPrivateFunc ps_get_font_private; + PS_GetFontValueFunc ps_get_font_value; + }; + + +#define FT_DEFINE_SERVICE_PSINFOREC( class_, \ + get_font_info_, \ + ps_get_font_extra_, \ + has_glyph_names_, \ + get_font_private_, \ + get_font_value_ ) \ + static const FT_Service_PsInfoRec class_ = \ + { \ + get_font_info_, ps_get_font_extra_, has_glyph_names_, \ + get_font_private_, get_font_value_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVPSINFO_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svsfnt.h b/Example/include/freetype/internal/services/svsfnt.h new file mode 100644 index 0000000..f98df2e --- /dev/null +++ b/Example/include/freetype/internal/services/svsfnt.h @@ -0,0 +1,88 @@ +/**************************************************************************** + * + * svsfnt.h + * + * The FreeType SFNT table loading service (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVSFNT_H_ +#define SVSFNT_H_ + +#include +#include + + +FT_BEGIN_HEADER + + + /* + * SFNT table loading service. + */ + +#define FT_SERVICE_ID_SFNT_TABLE "sfnt-table" + + + /* + * Used to implement FT_Load_Sfnt_Table(). + */ + typedef FT_Error + (*FT_SFNT_TableLoadFunc)( FT_Face face, + FT_ULong tag, + FT_Long offset, + FT_Byte* buffer, + FT_ULong* length ); + + /* + * Used to implement FT_Get_Sfnt_Table(). + */ + typedef void* + (*FT_SFNT_TableGetFunc)( FT_Face face, + FT_Sfnt_Tag tag ); + + + /* + * Used to implement FT_Sfnt_Table_Info(). + */ + typedef FT_Error + (*FT_SFNT_TableInfoFunc)( FT_Face face, + FT_UInt idx, + FT_ULong *tag, + FT_ULong *offset, + FT_ULong *length ); + + + FT_DEFINE_SERVICE( SFNT_Table ) + { + FT_SFNT_TableLoadFunc load_table; + FT_SFNT_TableGetFunc get_table; + FT_SFNT_TableInfoFunc table_info; + }; + + +#define FT_DEFINE_SERVICE_SFNT_TABLEREC( class_, load_, get_, info_ ) \ + static const FT_Service_SFNT_TableRec class_ = \ + { \ + load_, get_, info_ \ + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVSFNT_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svttcmap.h b/Example/include/freetype/internal/services/svttcmap.h new file mode 100644 index 0000000..5f9eb02 --- /dev/null +++ b/Example/include/freetype/internal/services/svttcmap.h @@ -0,0 +1,90 @@ +/**************************************************************************** + * + * svttcmap.h + * + * The FreeType TrueType/sfnt cmap extra information service. + * + * Copyright (C) 2003-2023 by + * Masatake YAMATO, Redhat K.K., + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + +/* Development of this service is support of + Information-technology Promotion Agency, Japan. */ + +#ifndef SVTTCMAP_H_ +#define SVTTCMAP_H_ + +#include +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_TT_CMAP "tt-cmaps" + + + /************************************************************************** + * + * @struct: + * TT_CMapInfo + * + * @description: + * A structure used to store TrueType/sfnt specific cmap information + * which is not covered by the generic @FT_CharMap structure. This + * structure can be accessed with the @FT_Get_TT_CMap_Info function. + * + * @fields: + * language :: + * The language ID used in Mac fonts. Definitions of values are in + * `ttnameid.h`. + * + * format :: + * The cmap format. OpenType 1.6 defines the formats 0 (byte encoding + * table), 2~(high-byte mapping through table), 4~(segment mapping to + * delta values), 6~(trimmed table mapping), 8~(mixed 16-bit and 32-bit + * coverage), 10~(trimmed array), 12~(segmented coverage), 13~(last + * resort font), and 14 (Unicode Variation Sequences). + */ + typedef struct TT_CMapInfo_ + { + FT_ULong language; + FT_Long format; + + } TT_CMapInfo; + + + typedef FT_Error + (*TT_CMap_Info_GetFunc)( FT_CharMap charmap, + TT_CMapInfo *cmap_info ); + + + FT_DEFINE_SERVICE( TTCMaps ) + { + TT_CMap_Info_GetFunc get_cmap_info; + }; + + +#define FT_DEFINE_SERVICE_TTCMAPSREC( class_, get_cmap_info_ ) \ + static const FT_Service_TTCMapsRec class_ = \ + { \ + get_cmap_info_ \ + }; + + /* */ + + +FT_END_HEADER + +#endif /* SVTTCMAP_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svtteng.h b/Example/include/freetype/internal/services/svtteng.h new file mode 100644 index 0000000..ad577cb --- /dev/null +++ b/Example/include/freetype/internal/services/svtteng.h @@ -0,0 +1,53 @@ +/**************************************************************************** + * + * svtteng.h + * + * The FreeType TrueType engine query service (specification). + * + * Copyright (C) 2006-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVTTENG_H_ +#define SVTTENG_H_ + +#include +#include + + +FT_BEGIN_HEADER + + + /* + * SFNT table loading service. + */ + +#define FT_SERVICE_ID_TRUETYPE_ENGINE "truetype-engine" + + /* + * Used to implement FT_Get_TrueType_Engine_Type + */ + + FT_DEFINE_SERVICE( TrueTypeEngine ) + { + FT_TrueTypeEngineType engine_type; + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVTTENG_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svttglyf.h b/Example/include/freetype/internal/services/svttglyf.h new file mode 100644 index 0000000..ca6fff7 --- /dev/null +++ b/Example/include/freetype/internal/services/svttglyf.h @@ -0,0 +1,56 @@ +/**************************************************************************** + * + * svttglyf.h + * + * The FreeType TrueType glyph service. + * + * Copyright (C) 2007-2023 by + * David Turner. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + +#ifndef SVTTGLYF_H_ +#define SVTTGLYF_H_ + +#include +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_TT_GLYF "tt-glyf" + + + typedef FT_ULong + (*TT_Glyf_GetLocationFunc)( FT_Face face, + FT_UInt gindex, + FT_ULong *psize ); + + FT_DEFINE_SERVICE( TTGlyf ) + { + TT_Glyf_GetLocationFunc get_location; + }; + + +#define FT_DEFINE_SERVICE_TTGLYFREC( class_, get_location_ ) \ + static const FT_Service_TTGlyfRec class_ = \ + { \ + get_location_ \ + }; + + /* */ + + +FT_END_HEADER + +#endif /* SVTTGLYF_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/services/svwinfnt.h b/Example/include/freetype/internal/services/svwinfnt.h new file mode 100644 index 0000000..002923f --- /dev/null +++ b/Example/include/freetype/internal/services/svwinfnt.h @@ -0,0 +1,50 @@ +/**************************************************************************** + * + * svwinfnt.h + * + * The FreeType Windows FNT/FONT service (specification). + * + * Copyright (C) 2003-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVWINFNT_H_ +#define SVWINFNT_H_ + +#include +#include + + +FT_BEGIN_HEADER + + +#define FT_SERVICE_ID_WINFNT "winfonts" + + typedef FT_Error + (*FT_WinFnt_GetHeaderFunc)( FT_Face face, + FT_WinFNT_HeaderRec *aheader ); + + + FT_DEFINE_SERVICE( WinFnt ) + { + FT_WinFnt_GetHeaderFunc get_header; + }; + + /* */ + + +FT_END_HEADER + + +#endif /* SVWINFNT_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/sfnt.h b/Example/include/freetype/internal/sfnt.h new file mode 100644 index 0000000..a2d4e15 --- /dev/null +++ b/Example/include/freetype/internal/sfnt.h @@ -0,0 +1,1092 @@ +/**************************************************************************** + * + * sfnt.h + * + * High-level 'sfnt' driver interface (specification). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SFNT_H_ +#define SFNT_H_ + + +#include +#include +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @functype: + * TT_Init_Face_Func + * + * @description: + * First part of the SFNT face object initialization. This finds the + * face in a SFNT file or collection, and load its format tag in + * face->format_tag. + * + * @input: + * stream :: + * The input stream. + * + * face :: + * A handle to the target face object. + * + * face_index :: + * The index of the TrueType font, if we are opening a collection, in + * bits 0-15. The numbered instance index~+~1 of a GX (sub)font, if + * applicable, in bits 16-30. + * + * num_params :: + * The number of additional parameters. + * + * params :: + * Optional additional parameters. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * The stream cursor must be at the font file's origin. + * + * This function recognizes fonts embedded in a 'TrueType collection'. + * + * Once the format tag has been validated by the font driver, it should + * then call the TT_Load_Face_Func() callback to read the rest of the + * SFNT tables in the object. + */ + typedef FT_Error + (*TT_Init_Face_Func)( FT_Stream stream, + TT_Face face, + FT_Int face_index, + FT_Int num_params, + FT_Parameter* params ); + + + /************************************************************************** + * + * @functype: + * TT_Load_Face_Func + * + * @description: + * Second part of the SFNT face object initialization. This loads the + * common SFNT tables (head, OS/2, maxp, metrics, etc.) in the face + * object. + * + * @input: + * stream :: + * The input stream. + * + * face :: + * A handle to the target face object. + * + * face_index :: + * The index of the TrueType font, if we are opening a collection, in + * bits 0-15. The numbered instance index~+~1 of a GX (sub)font, if + * applicable, in bits 16-30. + * + * num_params :: + * The number of additional parameters. + * + * params :: + * Optional additional parameters. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * This function must be called after TT_Init_Face_Func(). + */ + typedef FT_Error + (*TT_Load_Face_Func)( FT_Stream stream, + TT_Face face, + FT_Int face_index, + FT_Int num_params, + FT_Parameter* params ); + + + /************************************************************************** + * + * @functype: + * TT_Done_Face_Func + * + * @description: + * A callback used to delete the common SFNT data from a face. + * + * @input: + * face :: + * A handle to the target face object. + * + * @note: + * This function does NOT destroy the face object. + */ + typedef void + (*TT_Done_Face_Func)( TT_Face face ); + + + /************************************************************************** + * + * @functype: + * TT_Load_Any_Func + * + * @description: + * Load any font table into client memory. + * + * @input: + * face :: + * The face object to look for. + * + * tag :: + * The tag of table to load. Use the value 0 if you want to access the + * whole font file, else set this parameter to a valid TrueType table + * tag that you can forge with the MAKE_TT_TAG macro. + * + * offset :: + * The starting offset in the table (or the file if tag == 0). + * + * length :: + * The address of the decision variable: + * + * If `length == NULL`: Loads the whole table. Returns an error if + * 'offset' == 0! + * + * If `*length == 0`: Exits immediately; returning the length of the + * given table or of the font file, depending on the value of 'tag'. + * + * If `*length != 0`: Loads the next 'length' bytes of table or font, + * starting at offset 'offset' (in table or font too). + * + * @output: + * buffer :: + * The address of target buffer. + * + * @return: + * TrueType error code. 0 means success. + */ + typedef FT_Error + (*TT_Load_Any_Func)( TT_Face face, + FT_ULong tag, + FT_Long offset, + FT_Byte *buffer, + FT_ULong* length ); + + + /************************************************************************** + * + * @functype: + * TT_Find_SBit_Image_Func + * + * @description: + * Check whether an embedded bitmap (an 'sbit') exists for a given glyph, + * at a given strike. + * + * @input: + * face :: + * The target face object. + * + * glyph_index :: + * The glyph index. + * + * strike_index :: + * The current strike index. + * + * @output: + * arange :: + * The SBit range containing the glyph index. + * + * astrike :: + * The SBit strike containing the glyph index. + * + * aglyph_offset :: + * The offset of the glyph data in 'EBDT' table. + * + * @return: + * FreeType error code. 0 means success. Returns + * SFNT_Err_Invalid_Argument if no sbit exists for the requested glyph. + */ + typedef FT_Error + (*TT_Find_SBit_Image_Func)( TT_Face face, + FT_UInt glyph_index, + FT_ULong strike_index, + TT_SBit_Range *arange, + TT_SBit_Strike *astrike, + FT_ULong *aglyph_offset ); + + + /************************************************************************** + * + * @functype: + * TT_Load_SBit_Metrics_Func + * + * @description: + * Get the big metrics for a given embedded bitmap. + * + * @input: + * stream :: + * The input stream. + * + * range :: + * The SBit range containing the glyph. + * + * @output: + * big_metrics :: + * A big SBit metrics structure for the glyph. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * The stream cursor must be positioned at the glyph's offset within the + * 'EBDT' table before the call. + * + * If the image format uses variable metrics, the stream cursor is + * positioned just after the metrics header in the 'EBDT' table on + * function exit. + */ + typedef FT_Error + (*TT_Load_SBit_Metrics_Func)( FT_Stream stream, + TT_SBit_Range range, + TT_SBit_Metrics metrics ); + + + /************************************************************************** + * + * @functype: + * TT_Load_SBit_Image_Func + * + * @description: + * Load a given glyph sbit image from the font resource. This also + * returns its metrics. + * + * @input: + * face :: + * The target face object. + * + * strike_index :: + * The strike index. + * + * glyph_index :: + * The current glyph index. + * + * load_flags :: + * The current load flags. + * + * stream :: + * The input stream. + * + * @output: + * amap :: + * The target pixmap. + * + * ametrics :: + * A big sbit metrics structure for the glyph image. + * + * @return: + * FreeType error code. 0 means success. Returns an error if no glyph + * sbit exists for the index. + * + * @note: + * The `map.buffer` field is always freed before the glyph is loaded. + */ + typedef FT_Error + (*TT_Load_SBit_Image_Func)( TT_Face face, + FT_ULong strike_index, + FT_UInt glyph_index, + FT_UInt load_flags, + FT_Stream stream, + FT_Bitmap *amap, + TT_SBit_MetricsRec *ametrics ); + + + /************************************************************************** + * + * @functype: + * TT_Load_Svg_Doc_Func + * + * @description: + * Scan the SVG document list to find the document containing the glyph + * that has the ID 'glyph*XXX*', where *XXX* is the value of + * `glyph_index` as a decimal integer. + * + * @inout: + * glyph :: + * The glyph slot from which pointers to the SVG document list is to be + * grabbed. The results are stored back in the slot. + * + * @input: + * glyph_index :: + * The index of the glyph that is to be looked up. + * + * @return: + * FreeType error code. 0 means success. + */ + typedef FT_Error + (*TT_Load_Svg_Doc_Func)( FT_GlyphSlot glyph, + FT_UInt glyph_index ); + + + /************************************************************************** + * + * @functype: + * TT_Set_SBit_Strike_Func + * + * @description: + * Select an sbit strike for a given size request. + * + * @input: + * face :: + * The target face object. + * + * req :: + * The size request. + * + * @output: + * astrike_index :: + * The index of the sbit strike. + * + * @return: + * FreeType error code. 0 means success. Returns an error if no sbit + * strike exists for the selected ppem values. + */ + typedef FT_Error + (*TT_Set_SBit_Strike_Func)( TT_Face face, + FT_Size_Request req, + FT_ULong* astrike_index ); + + + /************************************************************************** + * + * @functype: + * TT_Load_Strike_Metrics_Func + * + * @description: + * Load the metrics of a given strike. + * + * @input: + * face :: + * The target face object. + * + * strike_index :: + * The strike index. + * + * @output: + * metrics :: + * the metrics of the strike. + * + * @return: + * FreeType error code. 0 means success. Returns an error if no such + * sbit strike exists. + */ + typedef FT_Error + (*TT_Load_Strike_Metrics_Func)( TT_Face face, + FT_ULong strike_index, + FT_Size_Metrics* metrics ); + + + /************************************************************************** + * + * @functype: + * TT_Get_PS_Name_Func + * + * @description: + * Get the PostScript glyph name of a glyph. + * + * @input: + * idx :: + * The glyph index. + * + * PSname :: + * The address of a string pointer. Will be `NULL` in case of error, + * otherwise it is a pointer to the glyph name. + * + * You must not modify the returned string! + * + * @output: + * FreeType error code. 0 means success. + */ + typedef FT_Error + (*TT_Get_PS_Name_Func)( TT_Face face, + FT_UInt idx, + FT_String** PSname ); + + + /************************************************************************** + * + * @functype: + * TT_Load_Metrics_Func + * + * @description: + * Load a metrics table, which is a table with a horizontal and a + * vertical version. + * + * @input: + * face :: + * A handle to the target face object. + * + * stream :: + * The input stream. + * + * vertical :: + * A boolean flag. If set, load the vertical one. + * + * @return: + * FreeType error code. 0 means success. + */ + typedef FT_Error + (*TT_Load_Metrics_Func)( TT_Face face, + FT_Stream stream, + FT_Bool vertical ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Metrics_Func + * + * @description: + * Load the horizontal or vertical header in a face object. + * + * @input: + * face :: + * A handle to the target face object. + * + * vertical :: + * A boolean flag. If set, load vertical metrics. + * + * gindex :: + * The glyph index. + * + * @output: + * abearing :: + * The horizontal (or vertical) bearing. Set to zero in case of error. + * + * aadvance :: + * The horizontal (or vertical) advance. Set to zero in case of error. + */ + typedef void + (*TT_Get_Metrics_Func)( TT_Face face, + FT_Bool vertical, + FT_UInt gindex, + FT_Short* abearing, + FT_UShort* aadvance ); + + + /************************************************************************** + * + * @functype: + * TT_Set_Palette_Func + * + * @description: + * Load the colors into `face->palette` for a given palette index. + * + * @input: + * face :: + * The target face object. + * + * idx :: + * The palette index. + * + * @return: + * FreeType error code. 0 means success. + */ + typedef FT_Error + (*TT_Set_Palette_Func)( TT_Face face, + FT_UInt idx ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Colr_Layer_Func + * + * @description: + * Iteratively get the color layer data of a given glyph index. + * + * @input: + * face :: + * The target face object. + * + * base_glyph :: + * The glyph index the colored glyph layers are associated with. + * + * @inout: + * iterator :: + * An @FT_LayerIterator object. For the first call you should set + * `iterator->p` to `NULL`. For all following calls, simply use the + * same object again. + * + * @output: + * aglyph_index :: + * The glyph index of the current layer. + * + * acolor_index :: + * The color index into the font face's color palette of the current + * layer. The value 0xFFFF is special; it doesn't reference a palette + * entry but indicates that the text foreground color should be used + * instead (to be set up by the application outside of FreeType). + * + * @return: + * Value~1 if everything is OK. If there are no more layers (or if there + * are no layers at all), value~0 gets returned. In case of an error, + * value~0 is returned also. + */ + typedef FT_Bool + (*TT_Get_Colr_Layer_Func)( TT_Face face, + FT_UInt base_glyph, + FT_UInt *aglyph_index, + FT_UInt *acolor_index, + FT_LayerIterator* iterator ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Color_Glyph_Paint_Func + * + * @description: + * Find the root @FT_OpaquePaint object for a given glyph ID. + * + * @input: + * face :: + * The target face object. + * + * base_glyph :: + * The glyph index the colored glyph layers are associated with. + * + * @output: + * paint :: + * The root @FT_OpaquePaint object. + * + * @return: + * Value~1 if everything is OK. If no color glyph is found, or the root + * paint could not be retrieved, value~0 gets returned. In case of an + * error, value~0 is returned also. + */ + typedef FT_Bool + ( *TT_Get_Color_Glyph_Paint_Func )( TT_Face face, + FT_UInt base_glyph, + FT_Color_Root_Transform root_transform, + FT_OpaquePaint *paint ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Color_Glyph_ClipBox_Func + * + * @description: + * Search for a 'COLR' v1 clip box for the specified `base_glyph` and + * fill the `clip_box` parameter with the 'COLR' v1 'ClipBox' information + * if one is found. + * + * @input: + * face :: + * A handle to the parent face object. + * + * base_glyph :: + * The glyph index for which to retrieve the clip box. + * + * @output: + * clip_box :: + * The clip box for the requested `base_glyph` if one is found. The + * clip box is computed taking scale and transformations configured on + * the @FT_Face into account. @FT_ClipBox contains @FT_Vector values + * in 26.6 format. + * + * @note: + * To retrieve the clip box in font units, reset scale to units-per-em + * and remove transforms configured using @FT_Set_Transform. + * + * @return: + * Value~1 if a ClipBox is found. If no clip box is found or an + * error occured, value~0 is returned. + */ + typedef FT_Bool + ( *TT_Get_Color_Glyph_ClipBox_Func )( TT_Face face, + FT_UInt base_glyph, + FT_ClipBox* clip_box ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Paint_Layers_Func + * + * @description: + * Access the layers of a `PaintColrLayers` table. + * + * @input: + * face :: + * The target face object. + * + * @inout: + * iterator :: + * The @FT_LayerIterator from an @FT_PaintColrLayers object, for which + * the layers are to be retrieved. The internal state of the iterator + * is incremented after one call to this function for retrieving one + * layer. + * + * @output: + * paint :: + * The root @FT_OpaquePaint object referencing the actual paint table. + * + * @return: + * Value~1 if everything is OK. Value~0 gets returned when the paint + * object can not be retrieved or any other error occurs. + */ + typedef FT_Bool + ( *TT_Get_Paint_Layers_Func )( TT_Face face, + FT_LayerIterator* iterator, + FT_OpaquePaint *paint ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Colorline_Stops_Func + * + * @description: + * Get the gradient and solid fill information for a given glyph. + * + * @input: + * face :: + * The target face object. + * + * @inout: + * iterator :: + * An @FT_ColorStopIterator object. For the first call you should set + * `iterator->p` to `NULL`. For all following calls, simply use the + * same object again. + * + * @output: + * color_stop :: + * Color index and alpha value for the retrieved color stop. + * + * @return: + * Value~1 if everything is OK. If there are no more color stops, + * value~0 gets returned. In case of an error, value~0 is returned + * also. + */ + typedef FT_Bool + ( *TT_Get_Colorline_Stops_Func )( TT_Face face, + FT_ColorStop *color_stop, + FT_ColorStopIterator* iterator ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Paint_Func + * + * @description: + * Get the paint details for a given @FT_OpaquePaint object. + * + * @input: + * face :: + * The target face object. + * + * opaque_paint :: + * The @FT_OpaquePaint object. + * + * @output: + * paint :: + * An @FT_COLR_Paint object holding the details on `opaque_paint`. + * + * @return: + * Value~1 if everything is OK. Value~0 if no details can be found for + * this paint or any other error occured. + */ + typedef FT_Bool + ( *TT_Get_Paint_Func )( TT_Face face, + FT_OpaquePaint opaque_paint, + FT_COLR_Paint *paint ); + + + /************************************************************************** + * + * @functype: + * TT_Blend_Colr_Func + * + * @description: + * Blend the bitmap in `new_glyph` into `base_glyph` using the color + * specified by `color_index`. If `color_index` is 0xFFFF, use + * `face->foreground_color` if `face->have_foreground_color` is set. + * Otherwise check `face->palette_data.palette_flags`: If present and + * @FT_PALETTE_FOR_DARK_BACKGROUND is set, use BGRA value 0xFFFFFFFF + * (white opaque). Otherwise use BGRA value 0x000000FF (black opaque). + * + * @input: + * face :: + * The target face object. + * + * color_index :: + * Color index from the COLR table. + * + * base_glyph :: + * Slot for bitmap to be merged into. The underlying bitmap may get + * reallocated. + * + * new_glyph :: + * Slot to be incooperated into `base_glyph`. + * + * @return: + * FreeType error code. 0 means success. Returns an error if + * color_index is invalid or reallocation fails. + */ + typedef FT_Error + (*TT_Blend_Colr_Func)( TT_Face face, + FT_UInt color_index, + FT_GlyphSlot base_glyph, + FT_GlyphSlot new_glyph ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Name_Func + * + * @description: + * From the 'name' table, return a given ENGLISH name record in ASCII. + * + * @input: + * face :: + * A handle to the source face object. + * + * nameid :: + * The name id of the name record to return. + * + * @inout: + * name :: + * The address of an allocated string pointer. `NULL` if no name is + * present. + * + * @return: + * FreeType error code. 0 means success. + */ + typedef FT_Error + (*TT_Get_Name_Func)( TT_Face face, + FT_UShort nameid, + FT_String** name ); + + + /************************************************************************** + * + * @functype: + * TT_Get_Name_ID_Func + * + * @description: + * Search whether an ENGLISH version for a given name ID is in the 'name' + * table. + * + * @input: + * face :: + * A handle to the source face object. + * + * nameid :: + * The name id of the name record to return. + * + * @output: + * win :: + * If non-negative, an index into the 'name' table with the + * corresponding (3,1) or (3,0) Windows entry. + * + * apple :: + * If non-negative, an index into the 'name' table with the + * corresponding (1,0) Apple entry. + * + * @return: + * 1 if there is either a win or apple entry (or both), 0 otheriwse. + */ + typedef FT_Bool + (*TT_Get_Name_ID_Func)( TT_Face face, + FT_UShort nameid, + FT_Int *win, + FT_Int *apple ); + + + /************************************************************************** + * + * @functype: + * TT_Load_Table_Func + * + * @description: + * Load a given TrueType table. + * + * @input: + * face :: + * A handle to the target face object. + * + * stream :: + * The input stream. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * The function uses `face->goto_table` to seek the stream to the start + * of the table, except while loading the font directory. + */ + typedef FT_Error + (*TT_Load_Table_Func)( TT_Face face, + FT_Stream stream ); + + + /************************************************************************** + * + * @functype: + * TT_Free_Table_Func + * + * @description: + * Free a given TrueType table. + * + * @input: + * face :: + * A handle to the target face object. + */ + typedef void + (*TT_Free_Table_Func)( TT_Face face ); + + + /* + * @functype: + * TT_Face_GetKerningFunc + * + * @description: + * Return the horizontal kerning value between two glyphs. + * + * @input: + * face :: + * A handle to the source face object. + * + * left_glyph :: + * The left glyph index. + * + * right_glyph :: + * The right glyph index. + * + * @return: + * The kerning value in font units. + */ + typedef FT_Int + (*TT_Face_GetKerningFunc)( TT_Face face, + FT_UInt left_glyph, + FT_UInt right_glyph ); + + + /************************************************************************** + * + * @struct: + * SFNT_Interface + * + * @description: + * This structure holds pointers to the functions used to load and free + * the basic tables that are required in a 'sfnt' font file. + * + * @fields: + * Check the various xxx_Func() descriptions for details. + */ + typedef struct SFNT_Interface_ + { + TT_Loader_GotoTableFunc goto_table; + + TT_Init_Face_Func init_face; + TT_Load_Face_Func load_face; + TT_Done_Face_Func done_face; + FT_Module_Requester get_interface; + + TT_Load_Any_Func load_any; + + /* these functions are called by `load_face' but they can also */ + /* be called from external modules, if there is a need to do so */ + TT_Load_Table_Func load_head; + TT_Load_Metrics_Func load_hhea; + TT_Load_Table_Func load_cmap; + TT_Load_Table_Func load_maxp; + TT_Load_Table_Func load_os2; + TT_Load_Table_Func load_post; + + TT_Load_Table_Func load_name; + TT_Free_Table_Func free_name; + + /* this field was called `load_kerning' up to version 2.1.10 */ + TT_Load_Table_Func load_kern; + + TT_Load_Table_Func load_gasp; + TT_Load_Table_Func load_pclt; + + /* see `ttload.h'; this field was called `load_bitmap_header' up to */ + /* version 2.1.10 */ + TT_Load_Table_Func load_bhed; + + TT_Load_SBit_Image_Func load_sbit_image; + + /* see `ttpost.h' */ + TT_Get_PS_Name_Func get_psname; + TT_Free_Table_Func free_psnames; + + /* starting here, the structure differs from version 2.1.7 */ + + /* this field was introduced in version 2.1.8, named `get_psname' */ + TT_Face_GetKerningFunc get_kerning; + + /* new elements introduced after version 2.1.10 */ + + /* load the font directory, i.e., the offset table and */ + /* the table directory */ + TT_Load_Table_Func load_font_dir; + TT_Load_Metrics_Func load_hmtx; + + TT_Load_Table_Func load_eblc; + TT_Free_Table_Func free_eblc; + + TT_Set_SBit_Strike_Func set_sbit_strike; + TT_Load_Strike_Metrics_Func load_strike_metrics; + + TT_Load_Table_Func load_cpal; + TT_Load_Table_Func load_colr; + TT_Free_Table_Func free_cpal; + TT_Free_Table_Func free_colr; + TT_Set_Palette_Func set_palette; + TT_Get_Colr_Layer_Func get_colr_layer; + TT_Get_Color_Glyph_Paint_Func get_colr_glyph_paint; + TT_Get_Color_Glyph_ClipBox_Func get_color_glyph_clipbox; + TT_Get_Paint_Layers_Func get_paint_layers; + TT_Get_Colorline_Stops_Func get_colorline_stops; + TT_Get_Paint_Func get_paint; + TT_Blend_Colr_Func colr_blend; + + TT_Get_Metrics_Func get_metrics; + + TT_Get_Name_Func get_name; + TT_Get_Name_ID_Func get_name_id; + + /* OpenType SVG Support */ + TT_Load_Table_Func load_svg; + TT_Free_Table_Func free_svg; + TT_Load_Svg_Doc_Func load_svg_doc; + + } SFNT_Interface; + + + /* transitional */ + typedef SFNT_Interface* SFNT_Service; + + +#define FT_DEFINE_SFNT_INTERFACE( \ + class_, \ + goto_table_, \ + init_face_, \ + load_face_, \ + done_face_, \ + get_interface_, \ + load_any_, \ + load_head_, \ + load_hhea_, \ + load_cmap_, \ + load_maxp_, \ + load_os2_, \ + load_post_, \ + load_name_, \ + free_name_, \ + load_kern_, \ + load_gasp_, \ + load_pclt_, \ + load_bhed_, \ + load_sbit_image_, \ + get_psname_, \ + free_psnames_, \ + get_kerning_, \ + load_font_dir_, \ + load_hmtx_, \ + load_eblc_, \ + free_eblc_, \ + set_sbit_strike_, \ + load_strike_metrics_, \ + load_cpal_, \ + load_colr_, \ + free_cpal_, \ + free_colr_, \ + set_palette_, \ + get_colr_layer_, \ + get_colr_glyph_paint_, \ + get_color_glyph_clipbox, \ + get_paint_layers_, \ + get_colorline_stops_, \ + get_paint_, \ + colr_blend_, \ + get_metrics_, \ + get_name_, \ + get_name_id_, \ + load_svg_, \ + free_svg_, \ + load_svg_doc_ ) \ + static const SFNT_Interface class_ = \ + { \ + goto_table_, \ + init_face_, \ + load_face_, \ + done_face_, \ + get_interface_, \ + load_any_, \ + load_head_, \ + load_hhea_, \ + load_cmap_, \ + load_maxp_, \ + load_os2_, \ + load_post_, \ + load_name_, \ + free_name_, \ + load_kern_, \ + load_gasp_, \ + load_pclt_, \ + load_bhed_, \ + load_sbit_image_, \ + get_psname_, \ + free_psnames_, \ + get_kerning_, \ + load_font_dir_, \ + load_hmtx_, \ + load_eblc_, \ + free_eblc_, \ + set_sbit_strike_, \ + load_strike_metrics_, \ + load_cpal_, \ + load_colr_, \ + free_cpal_, \ + free_colr_, \ + set_palette_, \ + get_colr_layer_, \ + get_colr_glyph_paint_, \ + get_color_glyph_clipbox, \ + get_paint_layers_, \ + get_colorline_stops_, \ + get_paint_, \ + colr_blend_, \ + get_metrics_, \ + get_name_, \ + get_name_id_, \ + load_svg_, \ + free_svg_, \ + load_svg_doc_ \ + }; + + +FT_END_HEADER + +#endif /* SFNT_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/svginterface.h b/Example/include/freetype/internal/svginterface.h new file mode 100644 index 0000000..f464b2c --- /dev/null +++ b/Example/include/freetype/internal/svginterface.h @@ -0,0 +1,46 @@ +/**************************************************************************** + * + * svginterface.h + * + * Interface of ot-svg module (specification only). + * + * Copyright (C) 2022-2023 by + * David Turner, Robert Wilhelm, Werner Lemberg, and Moazin Khatti. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef SVGINTERFACE_H_ +#define SVGINTERFACE_H_ + +#include +#include + + +FT_BEGIN_HEADER + + typedef FT_Error + (*Preset_Bitmap_Func)( FT_Module module, + FT_GlyphSlot slot, + FT_Bool cache ); + + typedef struct SVG_Interface_ + { + Preset_Bitmap_Func preset_slot; + + } SVG_Interface; + + typedef SVG_Interface* SVG_Service; + +FT_END_HEADER + +#endif /* SVGINTERFACE_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/t1types.h b/Example/include/freetype/internal/t1types.h new file mode 100644 index 0000000..b9c9439 --- /dev/null +++ b/Example/include/freetype/internal/t1types.h @@ -0,0 +1,259 @@ +/**************************************************************************** + * + * t1types.h + * + * Basic Type1/Type2 type definitions and interface (specification + * only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef T1TYPES_H_ +#define T1TYPES_H_ + + +#include +#include +#include +#include +#include + + +FT_BEGIN_HEADER + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** REQUIRED TYPE1/TYPE2 TABLES DEFINITIONS ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * T1_EncodingRec + * + * @description: + * A structure modeling a custom encoding. + * + * @fields: + * num_chars :: + * The number of character codes in the encoding. Usually 256. + * + * code_first :: + * The lowest valid character code in the encoding. + * + * code_last :: + * The highest valid character code in the encoding + 1. When equal to + * code_first there are no valid character codes. + * + * char_index :: + * An array of corresponding glyph indices. + * + * char_name :: + * An array of corresponding glyph names. + */ + typedef struct T1_EncodingRecRec_ + { + FT_Int num_chars; + FT_Int code_first; + FT_Int code_last; + + FT_UShort* char_index; + const FT_String** char_name; + + } T1_EncodingRec, *T1_Encoding; + + + /* used to hold extra data of PS_FontInfoRec that + * cannot be stored in the publicly defined structure. + * + * Note these can't be blended with multiple-masters. + */ + typedef struct PS_FontExtraRec_ + { + FT_UShort fs_type; + + } PS_FontExtraRec; + + + typedef struct T1_FontRec_ + { + PS_FontInfoRec font_info; /* font info dictionary */ + PS_FontExtraRec font_extra; /* font info extra fields */ + PS_PrivateRec private_dict; /* private dictionary */ + FT_String* font_name; /* top-level dictionary */ + + T1_EncodingType encoding_type; + T1_EncodingRec encoding; + + FT_Byte* subrs_block; + FT_Byte* charstrings_block; + FT_Byte* glyph_names_block; + + FT_Int num_subrs; + FT_Byte** subrs; + FT_UInt* subrs_len; + FT_Hash subrs_hash; + + FT_Int num_glyphs; + FT_String** glyph_names; /* array of glyph names */ + FT_Byte** charstrings; /* array of glyph charstrings */ + FT_UInt* charstrings_len; + + FT_Byte paint_type; + FT_Byte font_type; + FT_Matrix font_matrix; + FT_Vector font_offset; + FT_BBox font_bbox; + FT_Long font_id; + + FT_Fixed stroke_width; + + } T1_FontRec, *T1_Font; + + + typedef struct CID_SubrsRec_ + { + FT_Int num_subrs; + FT_Byte** code; + + } CID_SubrsRec, *CID_Subrs; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** AFM FONT INFORMATION STRUCTURES ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + typedef struct AFM_TrackKernRec_ + { + FT_Int degree; + FT_Fixed min_ptsize; + FT_Fixed min_kern; + FT_Fixed max_ptsize; + FT_Fixed max_kern; + + } AFM_TrackKernRec, *AFM_TrackKern; + + typedef struct AFM_KernPairRec_ + { + FT_UInt index1; + FT_UInt index2; + FT_Int x; + FT_Int y; + + } AFM_KernPairRec, *AFM_KernPair; + + typedef struct AFM_FontInfoRec_ + { + FT_Bool IsCIDFont; + FT_BBox FontBBox; + FT_Fixed Ascender; /* optional, mind the zero */ + FT_Fixed Descender; /* optional, mind the zero */ + AFM_TrackKern TrackKerns; /* free if non-NULL */ + FT_UInt NumTrackKern; + AFM_KernPair KernPairs; /* free if non-NULL */ + FT_UInt NumKernPair; + + } AFM_FontInfoRec, *AFM_FontInfo; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** ORIGINAL T1_FACE CLASS DEFINITION ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + typedef struct T1_FaceRec_* T1_Face; + typedef struct CID_FaceRec_* CID_Face; + + + typedef struct T1_FaceRec_ + { + FT_FaceRec root; + T1_FontRec type1; + const void* psnames; + const void* psaux; + const void* afm_data; + FT_CharMapRec charmaprecs[2]; + FT_CharMap charmaps[2]; + + /* support for Multiple Masters fonts */ + PS_Blend blend; + + /* undocumented, optional: indices of subroutines that express */ + /* the NormalizeDesignVector and the ConvertDesignVector procedure, */ + /* respectively, as Type 2 charstrings; -1 if keywords not present */ + FT_Int ndv_idx; + FT_Int cdv_idx; + + /* undocumented, optional: has the same meaning as len_buildchar */ + /* for Type 2 fonts; manipulated by othersubrs 19, 24, and 25 */ + FT_UInt len_buildchar; + FT_Long* buildchar; + + /* since version 2.1 - interface to PostScript hinter */ + const void* pshinter; + + } T1_FaceRec; + + + typedef struct CID_FaceRec_ + { + FT_FaceRec root; + void* psnames; + void* psaux; + CID_FaceInfoRec cid; + PS_FontExtraRec font_extra; +#if 0 + void* afm_data; +#endif + CID_Subrs subrs; + + /* since version 2.1 - interface to PostScript hinter */ + void* pshinter; + + /* since version 2.1.8, but was originally positioned after `afm_data' */ + FT_Byte* binary_data; /* used if hex data has been converted */ + FT_Stream cid_stream; + + } CID_FaceRec; + + +FT_END_HEADER + +#endif /* T1TYPES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/tttypes.h b/Example/include/freetype/internal/tttypes.h new file mode 100644 index 0000000..984121a --- /dev/null +++ b/Example/include/freetype/internal/tttypes.h @@ -0,0 +1,1756 @@ +/**************************************************************************** + * + * tttypes.h + * + * Basic SFNT/TrueType type definitions and interface (specification + * only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef TTTYPES_H_ +#define TTTYPES_H_ + + +#include +#include +#include + +#ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT +#include +#endif + + +FT_BEGIN_HEADER + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** REQUIRED TRUETYPE/OPENTYPE TABLES DEFINITIONS ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * TTC_HeaderRec + * + * @description: + * TrueType collection header. This table contains the offsets of the + * font headers of each distinct TrueType face in the file. + * + * @fields: + * tag :: + * Must be 'ttc~' to indicate a TrueType collection. + * + * version :: + * The version number. + * + * count :: + * The number of faces in the collection. The specification says this + * should be an unsigned long, but we use a signed long since we need + * the value -1 for specific purposes. + * + * offsets :: + * The offsets of the font headers, one per face. + */ + typedef struct TTC_HeaderRec_ + { + FT_ULong tag; + FT_Fixed version; + FT_Long count; + FT_ULong* offsets; + + } TTC_HeaderRec; + + + /************************************************************************** + * + * @struct: + * SFNT_HeaderRec + * + * @description: + * SFNT file format header. + * + * @fields: + * format_tag :: + * The font format tag. + * + * num_tables :: + * The number of tables in file. + * + * search_range :: + * Must be '16 * (max power of 2 <= num_tables)'. + * + * entry_selector :: + * Must be log2 of 'search_range / 16'. + * + * range_shift :: + * Must be 'num_tables * 16 - search_range'. + */ + typedef struct SFNT_HeaderRec_ + { + FT_ULong format_tag; + FT_UShort num_tables; + FT_UShort search_range; + FT_UShort entry_selector; + FT_UShort range_shift; + + FT_ULong offset; /* not in file */ + + } SFNT_HeaderRec, *SFNT_Header; + + + /************************************************************************** + * + * @struct: + * TT_TableRec + * + * @description: + * This structure describes a given table of a TrueType font. + * + * @fields: + * Tag :: + * A four-bytes tag describing the table. + * + * CheckSum :: + * The table checksum. This value can be ignored. + * + * Offset :: + * The offset of the table from the start of the TrueType font in its + * resource. + * + * Length :: + * The table length (in bytes). + */ + typedef struct TT_TableRec_ + { + FT_ULong Tag; /* table type */ + FT_ULong CheckSum; /* table checksum */ + FT_ULong Offset; /* table file offset */ + FT_ULong Length; /* table length */ + + } TT_TableRec, *TT_Table; + + + /************************************************************************** + * + * @struct: + * TT_LongMetricsRec + * + * @description: + * A structure modeling the long metrics of the 'hmtx' and 'vmtx' + * TrueType tables. The values are expressed in font units. + * + * @fields: + * advance :: + * The advance width or height for the glyph. + * + * bearing :: + * The left-side or top-side bearing for the glyph. + */ + typedef struct TT_LongMetricsRec_ + { + FT_UShort advance; + FT_Short bearing; + + } TT_LongMetricsRec, *TT_LongMetrics; + + + /************************************************************************** + * + * @type: + * TT_ShortMetrics + * + * @description: + * A simple type to model the short metrics of the 'hmtx' and 'vmtx' + * tables. + */ + typedef FT_Short TT_ShortMetrics; + + + /************************************************************************** + * + * @struct: + * TT_NameRec + * + * @description: + * A structure modeling TrueType name records. Name records are used to + * store important strings like family name, style name, copyright, + * etc. in _localized_ versions (i.e., language, encoding, etc). + * + * @fields: + * platformID :: + * The ID of the name's encoding platform. + * + * encodingID :: + * The platform-specific ID for the name's encoding. + * + * languageID :: + * The platform-specific ID for the name's language. + * + * nameID :: + * The ID specifying what kind of name this is. + * + * stringLength :: + * The length of the string in bytes. + * + * stringOffset :: + * The offset to the string in the 'name' table. + * + * string :: + * A pointer to the string's bytes. Note that these are usually UTF-16 + * encoded characters. + */ + typedef struct TT_NameRec_ + { + FT_UShort platformID; + FT_UShort encodingID; + FT_UShort languageID; + FT_UShort nameID; + FT_UShort stringLength; + FT_ULong stringOffset; + + /* this last field is not defined in the spec */ + /* but used by the FreeType engine */ + + FT_Byte* string; + + } TT_NameRec, *TT_Name; + + + /************************************************************************** + * + * @struct: + * TT_LangTagRec + * + * @description: + * A structure modeling language tag records in SFNT 'name' tables, + * introduced in OpenType version 1.6. + * + * @fields: + * stringLength :: + * The length of the string in bytes. + * + * stringOffset :: + * The offset to the string in the 'name' table. + * + * string :: + * A pointer to the string's bytes. Note that these are UTF-16BE + * encoded characters. + */ + typedef struct TT_LangTagRec_ + { + FT_UShort stringLength; + FT_ULong stringOffset; + + /* this last field is not defined in the spec */ + /* but used by the FreeType engine */ + + FT_Byte* string; + + } TT_LangTagRec, *TT_LangTag; + + + /************************************************************************** + * + * @struct: + * TT_NameTableRec + * + * @description: + * A structure modeling the TrueType name table. + * + * @fields: + * format :: + * The format of the name table. + * + * numNameRecords :: + * The number of names in table. + * + * storageOffset :: + * The offset of the name table in the 'name' TrueType table. + * + * names :: + * An array of name records. + * + * numLangTagRecords :: + * The number of language tags in table. + * + * langTags :: + * An array of language tag records. + * + * stream :: + * The file's input stream. + */ + typedef struct TT_NameTableRec_ + { + FT_UShort format; + FT_UInt numNameRecords; + FT_UInt storageOffset; + TT_NameRec* names; + FT_UInt numLangTagRecords; + TT_LangTagRec* langTags; + FT_Stream stream; + + } TT_NameTableRec, *TT_NameTable; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** OPTIONAL TRUETYPE/OPENTYPE TABLES DEFINITIONS ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * TT_GaspRangeRec + * + * @description: + * A tiny structure used to model a gasp range according to the TrueType + * specification. + * + * @fields: + * maxPPEM :: + * The maximum ppem value to which `gaspFlag` applies. + * + * gaspFlag :: + * A flag describing the grid-fitting and anti-aliasing modes to be + * used. + */ + typedef struct TT_GaspRangeRec_ + { + FT_UShort maxPPEM; + FT_UShort gaspFlag; + + } TT_GaspRangeRec, *TT_GaspRange; + + +#define TT_GASP_GRIDFIT 0x01 +#define TT_GASP_DOGRAY 0x02 + + + /************************************************************************** + * + * @struct: + * TT_GaspRec + * + * @description: + * A structure modeling the TrueType 'gasp' table used to specify + * grid-fitting and anti-aliasing behaviour. + * + * @fields: + * version :: + * The version number. + * + * numRanges :: + * The number of gasp ranges in table. + * + * gaspRanges :: + * An array of gasp ranges. + */ + typedef struct TT_Gasp_ + { + FT_UShort version; + FT_UShort numRanges; + TT_GaspRange gaspRanges; + + } TT_GaspRec; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** EMBEDDED BITMAPS SUPPORT ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * TT_SBit_MetricsRec + * + * @description: + * A structure used to hold the big metrics of a given glyph bitmap in a + * TrueType or OpenType font. These are usually found in the 'EBDT' + * (Microsoft) or 'bloc' (Apple) table. + * + * @fields: + * height :: + * The glyph height in pixels. + * + * width :: + * The glyph width in pixels. + * + * horiBearingX :: + * The horizontal left bearing. + * + * horiBearingY :: + * The horizontal top bearing. + * + * horiAdvance :: + * The horizontal advance. + * + * vertBearingX :: + * The vertical left bearing. + * + * vertBearingY :: + * The vertical top bearing. + * + * vertAdvance :: + * The vertical advance. + */ + typedef struct TT_SBit_MetricsRec_ + { + FT_UShort height; + FT_UShort width; + + FT_Short horiBearingX; + FT_Short horiBearingY; + FT_UShort horiAdvance; + + FT_Short vertBearingX; + FT_Short vertBearingY; + FT_UShort vertAdvance; + + } TT_SBit_MetricsRec, *TT_SBit_Metrics; + + + /************************************************************************** + * + * @struct: + * TT_SBit_SmallMetricsRec + * + * @description: + * A structure used to hold the small metrics of a given glyph bitmap in + * a TrueType or OpenType font. These are usually found in the 'EBDT' + * (Microsoft) or the 'bdat' (Apple) table. + * + * @fields: + * height :: + * The glyph height in pixels. + * + * width :: + * The glyph width in pixels. + * + * bearingX :: + * The left-side bearing. + * + * bearingY :: + * The top-side bearing. + * + * advance :: + * The advance width or height. + */ + typedef struct TT_SBit_Small_Metrics_ + { + FT_Byte height; + FT_Byte width; + + FT_Char bearingX; + FT_Char bearingY; + FT_Byte advance; + + } TT_SBit_SmallMetricsRec, *TT_SBit_SmallMetrics; + + + /************************************************************************** + * + * @struct: + * TT_SBit_LineMetricsRec + * + * @description: + * A structure used to describe the text line metrics of a given bitmap + * strike, for either a horizontal or vertical layout. + * + * @fields: + * ascender :: + * The ascender in pixels. + * + * descender :: + * The descender in pixels. + * + * max_width :: + * The maximum glyph width in pixels. + * + * caret_slope_enumerator :: + * Rise of the caret slope, typically set to 1 for non-italic fonts. + * + * caret_slope_denominator :: + * Rise of the caret slope, typically set to 0 for non-italic fonts. + * + * caret_offset :: + * Offset in pixels to move the caret for proper positioning. + * + * min_origin_SB :: + * Minimum of horiBearingX (resp. vertBearingY). + * min_advance_SB :: + * Minimum of + * + * horizontal advance - ( horiBearingX + width ) + * + * resp. + * + * vertical advance - ( vertBearingY + height ) + * + * max_before_BL :: + * Maximum of horiBearingY (resp. vertBearingY). + * + * min_after_BL :: + * Minimum of + * + * horiBearingY - height + * + * resp. + * + * vertBearingX - width + * + * pads :: + * Unused (to make the size of the record a multiple of 32 bits. + */ + typedef struct TT_SBit_LineMetricsRec_ + { + FT_Char ascender; + FT_Char descender; + FT_Byte max_width; + FT_Char caret_slope_numerator; + FT_Char caret_slope_denominator; + FT_Char caret_offset; + FT_Char min_origin_SB; + FT_Char min_advance_SB; + FT_Char max_before_BL; + FT_Char min_after_BL; + FT_Char pads[2]; + + } TT_SBit_LineMetricsRec, *TT_SBit_LineMetrics; + + + /************************************************************************** + * + * @struct: + * TT_SBit_RangeRec + * + * @description: + * A TrueType/OpenType subIndexTable as defined in the 'EBLC' (Microsoft) + * or 'bloc' (Apple) tables. + * + * @fields: + * first_glyph :: + * The first glyph index in the range. + * + * last_glyph :: + * The last glyph index in the range. + * + * index_format :: + * The format of index table. Valid values are 1 to 5. + * + * image_format :: + * The format of 'EBDT' image data. + * + * image_offset :: + * The offset to image data in 'EBDT'. + * + * image_size :: + * For index formats 2 and 5. This is the size in bytes of each glyph + * bitmap. + * + * big_metrics :: + * For index formats 2 and 5. This is the big metrics for each glyph + * bitmap. + * + * num_glyphs :: + * For index formats 4 and 5. This is the number of glyphs in the code + * array. + * + * glyph_offsets :: + * For index formats 1 and 3. + * + * glyph_codes :: + * For index formats 4 and 5. + * + * table_offset :: + * The offset of the index table in the 'EBLC' table. Only used during + * strike loading. + */ + typedef struct TT_SBit_RangeRec_ + { + FT_UShort first_glyph; + FT_UShort last_glyph; + + FT_UShort index_format; + FT_UShort image_format; + FT_ULong image_offset; + + FT_ULong image_size; + TT_SBit_MetricsRec metrics; + FT_ULong num_glyphs; + + FT_ULong* glyph_offsets; + FT_UShort* glyph_codes; + + FT_ULong table_offset; + + } TT_SBit_RangeRec, *TT_SBit_Range; + + + /************************************************************************** + * + * @struct: + * TT_SBit_StrikeRec + * + * @description: + * A structure used describe a given bitmap strike in the 'EBLC' + * (Microsoft) or 'bloc' (Apple) tables. + * + * @fields: + * num_index_ranges :: + * The number of index ranges. + * + * index_ranges :: + * An array of glyph index ranges. + * + * color_ref :: + * Unused. `color_ref` is put in for future enhancements, but these + * fields are already in use by other platforms (e.g. Newton). For + * details, please see + * + * https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bloc.html + * + * hori :: + * The line metrics for horizontal layouts. + * + * vert :: + * The line metrics for vertical layouts. + * + * start_glyph :: + * The lowest glyph index for this strike. + * + * end_glyph :: + * The highest glyph index for this strike. + * + * x_ppem :: + * The number of horizontal pixels per EM. + * + * y_ppem :: + * The number of vertical pixels per EM. + * + * bit_depth :: + * The bit depth. Valid values are 1, 2, 4, and 8. + * + * flags :: + * Is this a vertical or horizontal strike? For details, please see + * + * https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bloc.html + */ + typedef struct TT_SBit_StrikeRec_ + { + FT_Int num_ranges; + TT_SBit_Range sbit_ranges; + FT_ULong ranges_offset; + + FT_ULong color_ref; + + TT_SBit_LineMetricsRec hori; + TT_SBit_LineMetricsRec vert; + + FT_UShort start_glyph; + FT_UShort end_glyph; + + FT_Byte x_ppem; + FT_Byte y_ppem; + + FT_Byte bit_depth; + FT_Char flags; + + } TT_SBit_StrikeRec, *TT_SBit_Strike; + + + /************************************************************************** + * + * @struct: + * TT_SBit_ComponentRec + * + * @description: + * A simple structure to describe a compound sbit element. + * + * @fields: + * glyph_code :: + * The element's glyph index. + * + * x_offset :: + * The element's left bearing. + * + * y_offset :: + * The element's top bearing. + */ + typedef struct TT_SBit_ComponentRec_ + { + FT_UShort glyph_code; + FT_Char x_offset; + FT_Char y_offset; + + } TT_SBit_ComponentRec, *TT_SBit_Component; + + + /************************************************************************** + * + * @struct: + * TT_SBit_ScaleRec + * + * @description: + * A structure used describe a given bitmap scaling table, as defined in + * the 'EBSC' table. + * + * @fields: + * hori :: + * The horizontal line metrics. + * + * vert :: + * The vertical line metrics. + * + * x_ppem :: + * The number of horizontal pixels per EM. + * + * y_ppem :: + * The number of vertical pixels per EM. + * + * x_ppem_substitute :: + * Substitution x_ppem value. + * + * y_ppem_substitute :: + * Substitution y_ppem value. + */ + typedef struct TT_SBit_ScaleRec_ + { + TT_SBit_LineMetricsRec hori; + TT_SBit_LineMetricsRec vert; + + FT_Byte x_ppem; + FT_Byte y_ppem; + + FT_Byte x_ppem_substitute; + FT_Byte y_ppem_substitute; + + } TT_SBit_ScaleRec, *TT_SBit_Scale; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** POSTSCRIPT GLYPH NAMES SUPPORT ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * @struct: + * TT_Post_NamesRec + * + * @description: + * Postscript names table, either format 2.0 or 2.5. + * + * @fields: + * loaded :: + * A flag to indicate whether the PS names are loaded. + * + * num_glyphs :: + * The number of named glyphs in the table. + * + * num_names :: + * The number of PS names stored in the table. + * + * glyph_indices :: + * The indices of the glyphs in the names arrays. + * + * glyph_names :: + * The PS names not in Mac Encoding. + */ + typedef struct TT_Post_NamesRec_ + { + FT_Bool loaded; + FT_UShort num_glyphs; + FT_UShort num_names; + FT_UShort* glyph_indices; + FT_Byte** glyph_names; + + } TT_Post_NamesRec, *TT_Post_Names; + + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** GX VARIATION TABLE SUPPORT ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + +#ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT + typedef struct GX_BlendRec_ *GX_Blend; +#endif + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** EMBEDDED BDF PROPERTIES TABLE SUPPORT ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + /* + * These types are used to support a `BDF ' table that isn't part of the + * official TrueType specification. It is mainly used in SFNT-based bitmap + * fonts that were generated from a set of BDF fonts. + * + * The format of the table is as follows. + * + * USHORT version `BDF ' table version number, should be 0x0001. USHORT + * strikeCount Number of strikes (bitmap sizes) in this table. ULONG + * stringTable Offset (from start of BDF table) to string + * table. + * + * This is followed by an array of `strikeCount' descriptors, having the + * following format. + * + * USHORT ppem Vertical pixels per EM for this strike. USHORT numItems + * Number of items for this strike (properties and + * atoms). Maximum is 255. + * + * This array in turn is followed by `strikeCount' value sets. Each `value + * set' is an array of `numItems' items with the following format. + * + * ULONG item_name Offset in string table to item name. + * USHORT item_type The item type. Possible values are + * 0 => string (e.g., COMMENT) + * 1 => atom (e.g., FONT or even SIZE) + * 2 => int32 + * 3 => uint32 + * 0x10 => A flag to indicate a properties. This + * is ORed with the above values. + * ULONG item_value For strings => Offset into string table without + * the corresponding double quotes. + * For atoms => Offset into string table. + * For integers => Direct value. + * + * All strings in the string table consist of bytes and are + * zero-terminated. + * + */ + +#ifdef TT_CONFIG_OPTION_BDF + + typedef struct TT_BDFRec_ + { + FT_Byte* table; + FT_Byte* table_end; + FT_Byte* strings; + FT_ULong strings_size; + FT_UInt num_strikes; + FT_Bool loaded; + + } TT_BDFRec, *TT_BDF; + +#endif /* TT_CONFIG_OPTION_BDF */ + + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + /*** ***/ + /*** ***/ + /*** ORIGINAL TT_FACE CLASS DEFINITION ***/ + /*** ***/ + /*** ***/ + /*************************************************************************/ + /*************************************************************************/ + /*************************************************************************/ + + + /************************************************************************** + * + * This structure/class is defined here because it is common to the + * following formats: TTF, OpenType-TT, and OpenType-CFF. + * + * Note, however, that the classes TT_Size and TT_GlyphSlot are not shared + * between font drivers, and are thus defined in `ttobjs.h`. + * + */ + + + /************************************************************************** + * + * @type: + * TT_Face + * + * @description: + * A handle to a TrueType face/font object. A TT_Face encapsulates the + * resolution and scaling independent parts of a TrueType font resource. + * + * @note: + * The TT_Face structure is also used as a 'parent class' for the + * OpenType-CFF class (T2_Face). + */ + typedef struct TT_FaceRec_* TT_Face; + + + /* a function type used for the truetype bytecode interpreter hooks */ + typedef FT_Error + (*TT_Interpreter)( void* exec_context ); + + /* forward declaration */ + typedef struct TT_LoaderRec_* TT_Loader; + + + /************************************************************************** + * + * @functype: + * TT_Loader_GotoTableFunc + * + * @description: + * Seeks a stream to the start of a given TrueType table. + * + * @input: + * face :: + * A handle to the target face object. + * + * tag :: + * A 4-byte tag used to name the table. + * + * stream :: + * The input stream. + * + * @output: + * length :: + * The length of the table in bytes. Set to 0 if not needed. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * The stream cursor must be at the font file's origin. + */ + typedef FT_Error + (*TT_Loader_GotoTableFunc)( TT_Face face, + FT_ULong tag, + FT_Stream stream, + FT_ULong* length ); + + + /************************************************************************** + * + * @functype: + * TT_Loader_StartGlyphFunc + * + * @description: + * Seeks a stream to the start of a given glyph element, and opens a + * frame for it. + * + * @input: + * loader :: + * The current TrueType glyph loader object. + * + * glyph index :: The index of the glyph to access. + * + * offset :: + * The offset of the glyph according to the 'locations' table. + * + * byte_count :: + * The size of the frame in bytes. + * + * @return: + * FreeType error code. 0 means success. + * + * @note: + * This function is normally equivalent to FT_STREAM_SEEK(offset) + * followed by FT_FRAME_ENTER(byte_count) with the loader's stream, but + * alternative formats (e.g. compressed ones) might use something + * different. + */ + typedef FT_Error + (*TT_Loader_StartGlyphFunc)( TT_Loader loader, + FT_UInt glyph_index, + FT_ULong offset, + FT_UInt byte_count ); + + + /************************************************************************** + * + * @functype: + * TT_Loader_ReadGlyphFunc + * + * @description: + * Reads one glyph element (its header, a simple glyph, or a composite) + * from the loader's current stream frame. + * + * @input: + * loader :: + * The current TrueType glyph loader object. + * + * @return: + * FreeType error code. 0 means success. + */ + typedef FT_Error + (*TT_Loader_ReadGlyphFunc)( TT_Loader loader ); + + + /************************************************************************** + * + * @functype: + * TT_Loader_EndGlyphFunc + * + * @description: + * Closes the current loader stream frame for the glyph. + * + * @input: + * loader :: + * The current TrueType glyph loader object. + */ + typedef void + (*TT_Loader_EndGlyphFunc)( TT_Loader loader ); + + + typedef enum TT_SbitTableType_ + { + TT_SBIT_TABLE_TYPE_NONE = 0, + TT_SBIT_TABLE_TYPE_EBLC, /* `EBLC' (Microsoft), */ + /* `bloc' (Apple) */ + TT_SBIT_TABLE_TYPE_CBLC, /* `CBLC' (Google) */ + TT_SBIT_TABLE_TYPE_SBIX, /* `sbix' (Apple) */ + + /* do not remove */ + TT_SBIT_TABLE_TYPE_MAX + + } TT_SbitTableType; + + + /* OpenType 1.8 brings new tables for variation font support; */ + /* to make the old MM and GX fonts still work we need to check */ + /* the presence (and validity) of the functionality provided */ + /* by those tables. The following flag macros are for the */ + /* field `variation_support'. */ + /* */ + /* Note that `fvar' gets checked immediately at font loading, */ + /* while the other features are only loaded if MM support is */ + /* actually requested. */ + + /* FVAR */ +#define TT_FACE_FLAG_VAR_FVAR ( 1 << 0 ) + + /* HVAR */ +#define TT_FACE_FLAG_VAR_HADVANCE ( 1 << 1 ) +#define TT_FACE_FLAG_VAR_LSB ( 1 << 2 ) +#define TT_FACE_FLAG_VAR_RSB ( 1 << 3 ) + + /* VVAR */ +#define TT_FACE_FLAG_VAR_VADVANCE ( 1 << 4 ) +#define TT_FACE_FLAG_VAR_TSB ( 1 << 5 ) +#define TT_FACE_FLAG_VAR_BSB ( 1 << 6 ) +#define TT_FACE_FLAG_VAR_VORG ( 1 << 7 ) + + /* MVAR */ +#define TT_FACE_FLAG_VAR_MVAR ( 1 << 8 ) + + + /************************************************************************** + * + * TrueType Face Type + * + * @struct: + * TT_Face + * + * @description: + * The TrueType face class. These objects model the resolution and + * point-size independent data found in a TrueType font file. + * + * @fields: + * root :: + * The base FT_Face structure, managed by the base layer. + * + * ttc_header :: + * The TrueType collection header, used when the file is a 'ttc' rather + * than a 'ttf'. For ordinary font files, the field `ttc_header.count` + * is set to 0. + * + * format_tag :: + * The font format tag. + * + * num_tables :: + * The number of TrueType tables in this font file. + * + * dir_tables :: + * The directory of TrueType tables for this font file. + * + * header :: + * The font's font header ('head' table). Read on font opening. + * + * horizontal :: + * The font's horizontal header ('hhea' table). This field also + * contains the associated horizontal metrics table ('hmtx'). + * + * max_profile :: + * The font's maximum profile table. Read on font opening. Note that + * some maximum values cannot be taken directly from this table. We + * thus define additional fields below to hold the computed maxima. + * + * vertical_info :: + * A boolean which is set when the font file contains vertical metrics. + * If not, the value of the 'vertical' field is undefined. + * + * vertical :: + * The font's vertical header ('vhea' table). This field also contains + * the associated vertical metrics table ('vmtx'), if found. + * IMPORTANT: The contents of this field is undefined if the + * `vertical_info` field is unset. + * + * num_names :: + * The number of name records within this TrueType font. + * + * name_table :: + * The table of name records ('name'). + * + * os2 :: + * The font's OS/2 table ('OS/2'). + * + * postscript :: + * The font's PostScript table ('post' table). The PostScript glyph + * names are not loaded by the driver on face opening. See the + * 'ttpost' module for more details. + * + * cmap_table :: + * Address of the face's 'cmap' SFNT table in memory (it's an extracted + * frame). + * + * cmap_size :: + * The size in bytes of the `cmap_table` described above. + * + * goto_table :: + * A function called by each TrueType table loader to position a + * stream's cursor to the start of a given table according to its tag. + * It defaults to TT_Goto_Face but can be different for strange formats + * (e.g. Type 42). + * + * access_glyph_frame :: + * A function used to access the frame of a given glyph within the + * face's font file. + * + * forget_glyph_frame :: + * A function used to forget the frame of a given glyph when all data + * has been loaded. + * + * read_glyph_header :: + * A function used to read a glyph header. It must be called between + * an 'access' and 'forget'. + * + * read_simple_glyph :: + * A function used to read a simple glyph. It must be called after the + * header was read, and before the 'forget'. + * + * read_composite_glyph :: + * A function used to read a composite glyph. It must be called after + * the header was read, and before the 'forget'. + * + * sfnt :: + * A pointer to the SFNT service. + * + * psnames :: + * A pointer to the PostScript names service. + * + * mm :: + * A pointer to the Multiple Masters service. + * + * tt_var :: + * A pointer to the Metrics Variations service for the "truetype" + * driver. + * + * face_var :: + * A pointer to the Metrics Variations service for this `TT_Face`'s + * driver. + * + * psaux :: + * A pointer to the PostScript Auxiliary service. + * + * gasp :: + * The grid-fitting and scaling properties table ('gasp'). This table + * is optional in TrueType/OpenType fonts. + * + * pclt :: + * The 'pclt' SFNT table. + * + * num_sbit_scales :: + * The number of sbit scales for this font. + * + * sbit_scales :: + * Array of sbit scales embedded in this font. This table is optional + * in a TrueType/OpenType font. + * + * postscript_names :: + * A table used to store the Postscript names of the glyphs for this + * font. See the file `ttconfig.h` for comments on the + * TT_CONFIG_OPTION_POSTSCRIPT_NAMES option. + * + * palette_data :: + * Some fields from the 'CPAL' table that are directly indexed. + * + * palette_index :: + * The current palette index, as set by @FT_Palette_Select. + * + * palette :: + * An array containing the current palette's colors. + * + * have_foreground_color :: + * There was a call to @FT_Palette_Set_Foreground_Color. + * + * foreground_color :: + * The current foreground color corresponding to 'CPAL' color index + * 0xFFFF. Only valid if `have_foreground_color` is set. + * + * font_program_size :: + * Size in bytecodes of the face's font program. 0 if none defined. + * Ignored for Type 2 fonts. + * + * font_program :: + * The face's font program (bytecode stream) executed at load time, + * also used during glyph rendering. Comes from the 'fpgm' table. + * Ignored for Type 2 font fonts. + * + * cvt_program_size :: + * The size in bytecodes of the face's cvt program. Ignored for Type 2 + * fonts. + * + * cvt_program :: + * The face's cvt program (bytecode stream) executed each time an + * instance/size is changed/reset. Comes from the 'prep' table. + * Ignored for Type 2 fonts. + * + * cvt_size :: + * Size of the control value table (in entries). Ignored for Type 2 + * fonts. + * + * cvt :: + * The face's original control value table. Coordinates are expressed + * in unscaled font units (in 26.6 format). Comes from the 'cvt~' + * table. Ignored for Type 2 fonts. + * + * If varied by the `CVAR' table, non-integer values are possible. + * + * interpreter :: + * A pointer to the TrueType bytecode interpreters field is also used + * to hook the debugger in 'ttdebug'. + * + * extra :: + * Reserved for third-party font drivers. + * + * postscript_name :: + * The PS name of the font. Used by the postscript name service. + * + * glyf_len :: + * The length of the 'glyf' table. Needed for malformed 'loca' tables. + * + * glyf_offset :: + * The file offset of the 'glyf' table. + * + * is_cff2 :: + * Set if the font format is CFF2. + * + * doblend :: + * A boolean which is set if the font should be blended (this is for GX + * var). + * + * blend :: + * Contains the data needed to control GX variation tables (rather like + * Multiple Master data). + * + * variation_support :: + * Flags that indicate which OpenType functionality related to font + * variation support is present, valid, and usable. For example, + * TT_FACE_FLAG_VAR_FVAR is only set if we have at least one design + * axis. + * + * var_postscript_prefix :: + * The PostScript name prefix needed for constructing a variation font + * instance's PS name . + * + * var_postscript_prefix_len :: + * The length of the `var_postscript_prefix` string. + * + * var_default_named_instance :: + * The index of the default named instance. + * + * non_var_style_name :: + * The non-variation style name, used as a backup. + * + * horz_metrics_size :: + * The size of the 'hmtx' table. + * + * vert_metrics_size :: + * The size of the 'vmtx' table. + * + * num_locations :: + * The number of glyph locations in this TrueType file. This should be + * one more than the number of glyphs. Ignored for Type 2 fonts. + * + * glyph_locations :: + * An array of longs. These are offsets to glyph data within the + * 'glyf' table. Ignored for Type 2 font faces. + * + * hdmx_table :: + * A pointer to the 'hdmx' table. + * + * hdmx_table_size :: + * The size of the 'hdmx' table. + * + * hdmx_record_count :: + * The number of hdmx records. + * + * hdmx_record_size :: + * The size of a single hdmx record. + * + * hdmx_records :: + * A array of pointers to the 'hdmx' table records sorted by ppem. + * + * sbit_table :: + * A pointer to the font's embedded bitmap location table. + * + * sbit_table_size :: + * The size of `sbit_table`. + * + * sbit_table_type :: + * The sbit table type (CBLC, sbix, etc.). + * + * sbit_num_strikes :: + * The number of sbit strikes exposed by FreeType's API, omitting + * invalid strikes. + * + * sbit_strike_map :: + * A mapping between the strike indices exposed by the API and the + * indices used in the font's sbit table. + * + * kern_table :: + * A pointer to the 'kern' table. + * + * kern_table_size :: + * The size of the 'kern' table. + * + * num_kern_tables :: + * The number of supported kern subtables (up to 32; FreeType + * recognizes only horizontal ones with format 0). + * + * kern_avail_bits :: + * The availability status of kern subtables; if bit n is set, table n + * is available. + * + * kern_order_bits :: + * The sortedness status of kern subtables; if bit n is set, table n is + * sorted. + * + * bdf :: + * Data related to an SFNT font's 'bdf' table; see `tttypes.h`. + * + * horz_metrics_offset :: + * The file offset of the 'hmtx' table. + * + * vert_metrics_offset :: + * The file offset of the 'vmtx' table. + * + * sph_found_func_flags :: + * Flags identifying special bytecode functions (used by the v38 + * implementation of the bytecode interpreter). + * + * sph_compatibility_mode :: + * This flag is set if we are in ClearType backward compatibility mode + * (used by the v38 implementation of the bytecode interpreter). + * + * ebdt_start :: + * The file offset of the sbit data table (CBDT, bdat, etc.). + * + * ebdt_size :: + * The size of the sbit data table. + * + * cpal :: + * A pointer to data related to the 'CPAL' table. `NULL` if the table + * is not available. + * + * colr :: + * A pointer to data related to the 'COLR' table. `NULL` if the table + * is not available. + * + * svg :: + * A pointer to data related to the 'SVG' table. `NULL` if the table + * is not available. + */ + typedef struct TT_FaceRec_ + { + FT_FaceRec root; + + TTC_HeaderRec ttc_header; + + FT_ULong format_tag; + FT_UShort num_tables; + TT_Table dir_tables; + + TT_Header header; /* TrueType header table */ + TT_HoriHeader horizontal; /* TrueType horizontal header */ + + TT_MaxProfile max_profile; + + FT_Bool vertical_info; + TT_VertHeader vertical; /* TT Vertical header, if present */ + + FT_UShort num_names; /* number of name records */ + TT_NameTableRec name_table; /* name table */ + + TT_OS2 os2; /* TrueType OS/2 table */ + TT_Postscript postscript; /* TrueType Postscript table */ + + FT_Byte* cmap_table; /* extracted `cmap' table */ + FT_ULong cmap_size; + + TT_Loader_GotoTableFunc goto_table; + + TT_Loader_StartGlyphFunc access_glyph_frame; + TT_Loader_EndGlyphFunc forget_glyph_frame; + TT_Loader_ReadGlyphFunc read_glyph_header; + TT_Loader_ReadGlyphFunc read_simple_glyph; + TT_Loader_ReadGlyphFunc read_composite_glyph; + + /* a typeless pointer to the SFNT_Interface table used to load */ + /* the basic TrueType tables in the face object */ + void* sfnt; + + /* a typeless pointer to the FT_Service_PsCMapsRec table used to */ + /* handle glyph names <-> unicode & Mac values */ + void* psnames; + +#ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT + /* a typeless pointer to the FT_Service_MultiMasters table used to */ + /* handle variation fonts */ + void* mm; + + /* a typeless pointer to the FT_Service_MetricsVariationsRec table */ + /* used to handle the HVAR, VVAR, and MVAR OpenType tables by the */ + /* "truetype" driver */ + void* tt_var; + + /* a typeless pointer to the FT_Service_MetricsVariationsRec table */ + /* used to handle the HVAR, VVAR, and MVAR OpenType tables by this */ + /* TT_Face's driver */ + void* face_var; /* since 2.13.1 */ +#endif + + /* a typeless pointer to the PostScript Aux service */ + void* psaux; + + + /************************************************************************ + * + * Optional TrueType/OpenType tables + * + */ + + /* grid-fitting and scaling table */ + TT_GaspRec gasp; /* the `gasp' table */ + + /* PCL 5 table */ + TT_PCLT pclt; + + /* embedded bitmaps support */ + FT_ULong num_sbit_scales; + TT_SBit_Scale sbit_scales; + + /* postscript names table */ + TT_Post_NamesRec postscript_names; + + /* glyph colors */ + FT_Palette_Data palette_data; /* since 2.10 */ + FT_UShort palette_index; + FT_Color* palette; + FT_Bool have_foreground_color; + FT_Color foreground_color; + + + /************************************************************************ + * + * TrueType-specific fields (ignored by the CFF driver) + * + */ + + /* the font program, if any */ + FT_ULong font_program_size; + FT_Byte* font_program; + + /* the cvt program, if any */ + FT_ULong cvt_program_size; + FT_Byte* cvt_program; + + /* the original, unscaled, control value table */ + FT_ULong cvt_size; + FT_Int32* cvt; + + /* A pointer to the bytecode interpreter to use. This is also */ + /* used to hook the debugger for the `ttdebug' utility. */ + TT_Interpreter interpreter; + + + /************************************************************************ + * + * Other tables or fields. This is used by derivative formats like + * OpenType. + * + */ + + FT_Generic extra; + + const char* postscript_name; + + FT_ULong glyf_len; + FT_ULong glyf_offset; /* since 2.7.1 */ + + FT_Bool is_cff2; /* since 2.7.1 */ + +#ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT + FT_Bool doblend; + GX_Blend blend; + + FT_UInt32 variation_support; /* since 2.7.1 */ + + const char* var_postscript_prefix; /* since 2.7.2 */ + FT_UInt var_postscript_prefix_len; /* since 2.7.2 */ + + FT_UInt var_default_named_instance; /* since 2.13.1 */ + + const char* non_var_style_name; /* since 2.13.1 */ +#endif + + /* since version 2.2 */ + + FT_ULong horz_metrics_size; + FT_ULong vert_metrics_size; + + FT_ULong num_locations; /* up to 0xFFFF + 1 */ + FT_Byte* glyph_locations; + + FT_Byte* hdmx_table; + FT_ULong hdmx_table_size; + FT_UInt hdmx_record_count; + FT_ULong hdmx_record_size; + FT_Byte** hdmx_records; + + FT_Byte* sbit_table; + FT_ULong sbit_table_size; + TT_SbitTableType sbit_table_type; + FT_UInt sbit_num_strikes; + FT_UInt* sbit_strike_map; + + FT_Byte* kern_table; + FT_ULong kern_table_size; + FT_UInt num_kern_tables; + FT_UInt32 kern_avail_bits; + FT_UInt32 kern_order_bits; + +#ifdef TT_CONFIG_OPTION_BDF + TT_BDFRec bdf; +#endif /* TT_CONFIG_OPTION_BDF */ + + /* since 2.3.0 */ + FT_ULong horz_metrics_offset; + FT_ULong vert_metrics_offset; + +#ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY + /* since 2.4.12 */ + FT_ULong sph_found_func_flags; /* special functions found */ + /* for this face */ + FT_Bool sph_compatibility_mode; +#endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ + +#ifdef TT_CONFIG_OPTION_EMBEDDED_BITMAPS + /* since 2.7 */ + FT_ULong ebdt_start; /* either `CBDT', `EBDT', or `bdat' */ + FT_ULong ebdt_size; +#endif + + /* since 2.10 */ + void* cpal; + void* colr; + + /* since 2.12 */ + void* svg; + + } TT_FaceRec; + + + /************************************************************************** + * + * @struct: + * TT_GlyphZoneRec + * + * @description: + * A glyph zone is used to load, scale and hint glyph outline + * coordinates. + * + * @fields: + * memory :: + * A handle to the memory manager. + * + * max_points :: + * The maximum size in points of the zone. + * + * max_contours :: + * Max size in links contours of the zone. + * + * n_points :: + * The current number of points in the zone. + * + * n_contours :: + * The current number of contours in the zone. + * + * org :: + * The original glyph coordinates (font units/scaled). + * + * cur :: + * The current glyph coordinates (scaled/hinted). + * + * tags :: + * The point control tags. + * + * contours :: + * The contours end points. + * + * first_point :: + * Offset of the current subglyph's first point. + */ + typedef struct TT_GlyphZoneRec_ + { + FT_Memory memory; + FT_UShort max_points; + FT_Short max_contours; + FT_UShort n_points; /* number of points in zone */ + FT_Short n_contours; /* number of contours */ + + FT_Vector* org; /* original point coordinates */ + FT_Vector* cur; /* current point coordinates */ + FT_Vector* orus; /* original (unscaled) point coordinates */ + + FT_Byte* tags; /* current touch flags */ + FT_UShort* contours; /* contour end points */ + + FT_UShort first_point; /* offset of first (#0) point */ + + } TT_GlyphZoneRec, *TT_GlyphZone; + + + /* handle to execution context */ + typedef struct TT_ExecContextRec_* TT_ExecContext; + + + /************************************************************************** + * + * @type: + * TT_Size + * + * @description: + * A handle to a TrueType size object. + */ + typedef struct TT_SizeRec_* TT_Size; + + + /* glyph loader structure */ + typedef struct TT_LoaderRec_ + { + TT_Face face; + TT_Size size; + FT_GlyphSlot glyph; + FT_GlyphLoader gloader; + + FT_ULong load_flags; + FT_UInt glyph_index; + + FT_Stream stream; + FT_UInt byte_len; + + FT_Short n_contours; + FT_BBox bbox; + FT_Int left_bearing; + FT_Int advance; + FT_Int linear; + FT_Bool linear_def; + FT_Vector pp1; + FT_Vector pp2; + + /* the zone where we load our glyphs */ + TT_GlyphZoneRec base; + TT_GlyphZoneRec zone; + + TT_ExecContext exec; + FT_Byte* instructions; + FT_ULong ins_pos; + + /* for possible extensibility in other formats */ + void* other; + + /* since version 2.1.8 */ + FT_Int top_bearing; + FT_Int vadvance; + FT_Vector pp3; + FT_Vector pp4; + + /* since version 2.2.1 */ + FT_Byte* cursor; + FT_Byte* limit; + + /* since version 2.6.2 */ + FT_ListRec composites; + + /* since version 2.11.2 */ + FT_Byte* widthp; + + } TT_LoaderRec; + + +FT_END_HEADER + +#endif /* TTTYPES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/internal/wofftypes.h b/Example/include/freetype/internal/wofftypes.h new file mode 100644 index 0000000..0c1d8ee --- /dev/null +++ b/Example/include/freetype/internal/wofftypes.h @@ -0,0 +1,312 @@ +/**************************************************************************** + * + * wofftypes.h + * + * Basic WOFF/WOFF2 type definitions and interface (specification + * only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef WOFFTYPES_H_ +#define WOFFTYPES_H_ + + +#include +#include + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @struct: + * WOFF_HeaderRec + * + * @description: + * WOFF file format header. + * + * @fields: + * See + * + * https://www.w3.org/TR/WOFF/#WOFFHeader + */ + typedef struct WOFF_HeaderRec_ + { + FT_ULong signature; + FT_ULong flavor; + FT_ULong length; + FT_UShort num_tables; + FT_UShort reserved; + FT_ULong totalSfntSize; + FT_UShort majorVersion; + FT_UShort minorVersion; + FT_ULong metaOffset; + FT_ULong metaLength; + FT_ULong metaOrigLength; + FT_ULong privOffset; + FT_ULong privLength; + + } WOFF_HeaderRec, *WOFF_Header; + + + /************************************************************************** + * + * @struct: + * WOFF_TableRec + * + * @description: + * This structure describes a given table of a WOFF font. + * + * @fields: + * Tag :: + * A four-bytes tag describing the table. + * + * Offset :: + * The offset of the table from the start of the WOFF font in its + * resource. + * + * CompLength :: + * Compressed table length (in bytes). + * + * OrigLength :: + * Uncompressed table length (in bytes). + * + * CheckSum :: + * The table checksum. This value can be ignored. + * + * OrigOffset :: + * The uncompressed table file offset. This value gets computed while + * constructing the (uncompressed) SFNT header. It is not contained in + * the WOFF file. + */ + typedef struct WOFF_TableRec_ + { + FT_Tag Tag; /* table ID */ + FT_ULong Offset; /* table file offset */ + FT_ULong CompLength; /* compressed table length */ + FT_ULong OrigLength; /* uncompressed table length */ + FT_ULong CheckSum; /* uncompressed checksum */ + + FT_ULong OrigOffset; /* uncompressed table file offset */ + /* (not in the WOFF file) */ + } WOFF_TableRec, *WOFF_Table; + + + /************************************************************************** + * + * @struct: + * WOFF2_TtcFontRec + * + * @description: + * Metadata for a TTC font entry in WOFF2. + * + * @fields: + * flavor :: + * TTC font flavor. + * + * num_tables :: + * Number of tables in TTC, indicating number of elements in + * `table_indices`. + * + * table_indices :: + * Array of table indices for each TTC font. + */ + typedef struct WOFF2_TtcFontRec_ + { + FT_ULong flavor; + FT_UShort num_tables; + FT_UShort* table_indices; + + } WOFF2_TtcFontRec, *WOFF2_TtcFont; + + + /************************************************************************** + * + * @struct: + * WOFF2_HeaderRec + * + * @description: + * WOFF2 file format header. + * + * @fields: + * See + * + * https://www.w3.org/TR/WOFF2/#woff20Header + * + * @note: + * We don't care about the fields `reserved`, `majorVersion` and + * `minorVersion`, so they are not included. The `totalSfntSize` field + * does not necessarily represent the actual size of the uncompressed + * SFNT font stream, so that is used as a reference value instead. + */ + typedef struct WOFF2_HeaderRec_ + { + FT_ULong signature; + FT_ULong flavor; + FT_ULong length; + FT_UShort num_tables; + FT_ULong totalSfntSize; + FT_ULong totalCompressedSize; + FT_ULong metaOffset; + FT_ULong metaLength; + FT_ULong metaOrigLength; + FT_ULong privOffset; + FT_ULong privLength; + + FT_ULong uncompressed_size; /* uncompressed brotli stream size */ + FT_ULong compressed_offset; /* compressed stream offset */ + FT_ULong header_version; /* version of original TTC Header */ + FT_UShort num_fonts; /* number of fonts in TTC */ + FT_ULong actual_sfnt_size; /* actual size of sfnt stream */ + + WOFF2_TtcFont ttc_fonts; /* metadata for fonts in a TTC */ + + } WOFF2_HeaderRec, *WOFF2_Header; + + + /************************************************************************** + * + * @struct: + * WOFF2_TableRec + * + * @description: + * This structure describes a given table of a WOFF2 font. + * + * @fields: + * See + * + * https://www.w3.org/TR/WOFF2/#table_dir_format + */ + typedef struct WOFF2_TableRec_ + { + FT_Byte FlagByte; /* table type and flags */ + FT_Tag Tag; /* table file offset */ + FT_ULong dst_length; /* uncompressed table length */ + FT_ULong TransformLength; /* transformed length */ + + FT_ULong flags; /* calculated flags */ + FT_ULong src_offset; /* compressed table offset */ + FT_ULong src_length; /* compressed table length */ + FT_ULong dst_offset; /* uncompressed table offset */ + + } WOFF2_TableRec, *WOFF2_Table; + + + /************************************************************************** + * + * @struct: + * WOFF2_InfoRec + * + * @description: + * Metadata for WOFF2 font that may be required for reconstruction of + * sfnt tables. + * + * @fields: + * header_checksum :: + * Checksum of SFNT offset table. + * + * num_glyphs :: + * Number of glyphs in the font. + * + * num_hmetrics :: + * `numberOfHMetrics` field in the 'hhea' table. + * + * x_mins :: + * `xMin` values of glyph bounding box. + * + * glyf_table :: + * A pointer to the `glyf' table record. + * + * loca_table :: + * A pointer to the `loca' table record. + * + * head_table :: + * A pointer to the `head' table record. + */ + typedef struct WOFF2_InfoRec_ + { + FT_ULong header_checksum; + FT_UShort num_glyphs; + FT_UShort num_hmetrics; + FT_Short* x_mins; + + WOFF2_Table glyf_table; + WOFF2_Table loca_table; + WOFF2_Table head_table; + + } WOFF2_InfoRec, *WOFF2_Info; + + + /************************************************************************** + * + * @struct: + * WOFF2_SubstreamRec + * + * @description: + * This structure stores information about a substream in the transformed + * 'glyf' table in a WOFF2 stream. + * + * @fields: + * start :: + * Beginning of the substream relative to uncompressed table stream. + * + * offset :: + * Offset of the substream relative to uncompressed table stream. + * + * size :: + * Size of the substream. + */ + typedef struct WOFF2_SubstreamRec_ + { + FT_ULong start; + FT_ULong offset; + FT_ULong size; + + } WOFF2_SubstreamRec, *WOFF2_Substream; + + + /************************************************************************** + * + * @struct: + * WOFF2_PointRec + * + * @description: + * This structure stores information about a point in the transformed + * 'glyf' table in a WOFF2 stream. + * + * @fields: + * x :: + * x-coordinate of point. + * + * y :: + * y-coordinate of point. + * + * on_curve :: + * Set if point is on-curve. + */ + typedef struct WOFF2_PointRec_ + { + FT_Int x; + FT_Int y; + FT_Bool on_curve; + + } WOFF2_PointRec, *WOFF2_Point; + + +FT_END_HEADER + +#endif /* WOFFTYPES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/otsvg.h b/Example/include/freetype/otsvg.h new file mode 100644 index 0000000..bfe9a6a --- /dev/null +++ b/Example/include/freetype/otsvg.h @@ -0,0 +1,336 @@ +/**************************************************************************** + * + * otsvg.h + * + * Interface for OT-SVG support related things (specification). + * + * Copyright (C) 2022-2023 by + * David Turner, Robert Wilhelm, Werner Lemberg, and Moazin Khatti. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef OTSVG_H_ +#define OTSVG_H_ + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * svg_fonts + * + * @title: + * OpenType SVG Fonts + * + * @abstract: + * OT-SVG API between FreeType and an external SVG rendering library. + * + * @description: + * This section describes the four hooks necessary to render SVG + * 'documents' that are contained in an OpenType font's 'SVG~' table. + * + * For more information on the implementation, see our standard hooks + * based on 'librsvg' in the [FreeType Demo + * Programs](https://gitlab.freedesktop.org/freetype/freetype-demos) + * repository. + * + */ + + + /************************************************************************** + * + * @functype: + * SVG_Lib_Init_Func + * + * @description: + * A callback that is called when the first OT-SVG glyph is rendered in + * the lifetime of an @FT_Library object. In a typical implementation, + * one would want to allocate a structure and point the `data_pointer` + * to it and perform any library initializations that might be needed. + * + * @inout: + * data_pointer :: + * The SVG rendering module stores a pointer variable that can be used + * by clients to store any data that needs to be shared across + * different hooks. `data_pointer` is essentially a pointer to that + * pointer such that it can be written to as well as read from. + * + * @return: + * FreeType error code. 0 means success. + * + * @since: + * 2.12 + */ + typedef FT_Error + (*SVG_Lib_Init_Func)( FT_Pointer *data_pointer ); + + + /************************************************************************** + * + * @functype: + * SVG_Lib_Free_Func + * + * @description: + * A callback that is called when the `ot-svg` module is being freed. + * It is only called if the init hook was called earlier. This means + * that neither the init nor the free hook is called if no OT-SVG glyph + * is rendered. + * + * In a typical implementation, one would want to free any state + * structure that was allocated in the init hook and perform any + * library-related closure that might be needed. + * + * @inout: + * data_pointer :: + * The SVG rendering module stores a pointer variable that can be used + * by clients to store any data that needs to be shared across + * different hooks. `data_pointer` is essentially a pointer to that + * pointer such that it can be written to as well as read from. + * + * @since: + * 2.12 + */ + typedef void + (*SVG_Lib_Free_Func)( FT_Pointer *data_pointer ); + + + /************************************************************************** + * + * @functype: + * SVG_Lib_Render_Func + * + * @description: + * A callback that is called to render an OT-SVG glyph. This callback + * hook is called right after the preset hook @SVG_Lib_Preset_Slot_Func + * has been called with `cache` set to `TRUE`. The data necessary to + * render is available through the handle @FT_SVG_Document, which is set + * in the `other` field of @FT_GlyphSlotRec. + * + * The render hook is expected to render the SVG glyph to the bitmap + * buffer that is allocated already at `slot->bitmap.buffer`. It also + * sets the `num_grays` value as well as `slot->format`. + * + * @input: + * slot :: + * The slot to render. + * + * @inout: + * data_pointer :: + * The SVG rendering module stores a pointer variable that can be used + * by clients to store any data that needs to be shared across + * different hooks. `data_pointer` is essentially a pointer to that + * pointer such that it can be written to as well as read from. + * + * @return: + * FreeType error code. 0 means success. + * + * @since: + * 2.12 + */ + typedef FT_Error + (*SVG_Lib_Render_Func)( FT_GlyphSlot slot, + FT_Pointer *data_pointer ); + + + /************************************************************************** + * + * @functype: + * SVG_Lib_Preset_Slot_Func + * + * @description: + * A callback that is called to preset the glyph slot. It is called from + * two places. + * + * 1. When `FT_Load_Glyph` needs to preset the glyph slot. + * + * 2. Right before the `svg` module calls the render callback hook. + * + * When it is the former, the argument `cache` is set to `FALSE`. When + * it is the latter, the argument `cache` is set to `TRUE`. This + * distinction has been made because many calculations that are necessary + * for presetting a glyph slot are the same needed later for the render + * callback hook. Thus, if `cache` is `TRUE`, the hook can _cache_ those + * calculations in a memory block referenced by the state pointer. + * + * This hook is expected to preset the slot by setting parameters such as + * `bitmap_left`, `bitmap_top`, `width`, `rows`, `pitch`, and + * `pixel_mode`. It is also expected to set all the metrics for the slot + * including the vertical advance if it is not already set. Typically, + * fonts have horizontal advances but not vertical ones. If those are + * available, they had already been set, otherwise they have to be + * estimated and set manually. The hook must take into account the + * transformations that have been set, and translate the transformation + * matrices into the SVG coordinate system, as the original matrix is + * intended for the TTF/CFF coordinate system. + * + * @input: + * slot :: + * The glyph slot that has the SVG document loaded. + * + * cache :: + * See description. + * + * @inout: + * data_pointer :: + * The SVG rendering module stores a pointer variable that can be used + * by clients to store any data that needs to be shared across + * different hooks. `data_pointer` is essentially a pointer to that + * pointer such that it can be written to as well as read from. + * + * @return: + * FreeType error code. 0 means success. + * + * @since: + * 2.12 + */ + typedef FT_Error + (*SVG_Lib_Preset_Slot_Func)( FT_GlyphSlot slot, + FT_Bool cache, + FT_Pointer *state ); + + + /************************************************************************** + * + * @struct: + * SVG_RendererHooks + * + * @description: + * A structure that stores the four hooks needed to render OT-SVG glyphs + * properly. The structure is publicly used to set the hooks via the + * @svg-hooks driver property. + * + * The behavior of each hook is described in its documentation. One + * thing to note is that the preset hook and the render hook often need + * to do the same operations; therefore, it's better to cache the + * intermediate data in a state structure to avoid calculating it twice. + * For example, in the preset hook one can draw the glyph on a recorder + * surface and later create a bitmap surface from it in the render hook. + * + * All four hooks must be non-NULL. + * + * @fields: + * init_svg :: + * The initialization hook. + * + * free_svg :: + * The cleanup hook. + * + * render_hook :: + * The render hook. + * + * preset_slot :: + * The preset hook. + * + * @since: + * 2.12 + */ + typedef struct SVG_RendererHooks_ + { + SVG_Lib_Init_Func init_svg; + SVG_Lib_Free_Func free_svg; + SVG_Lib_Render_Func render_svg; + + SVG_Lib_Preset_Slot_Func preset_slot; + + } SVG_RendererHooks; + + + /************************************************************************** + * + * @struct: + * FT_SVG_DocumentRec + * + * @description: + * A structure that models one SVG document. + * + * @fields: + * svg_document :: + * A pointer to the SVG document. + * + * svg_document_length :: + * The length of `svg_document`. + * + * metrics :: + * A metrics object storing the size information. + * + * units_per_EM :: + * The size of the EM square. + * + * start_glyph_id :: + * The first glyph ID in the glyph range covered by this document. + * + * end_glyph_id :: + * The last glyph ID in the glyph range covered by this document. + * + * transform :: + * A 2x2 transformation matrix to apply to the glyph while rendering + * it. + * + * delta :: + * The translation to apply to the glyph while rendering. + * + * @note: + * When an @FT_GlyphSlot object `slot` is passed down to a renderer, the + * renderer can only access the `metrics` and `units_per_EM` fields via + * `slot->face`. However, when @FT_Glyph_To_Bitmap sets up a dummy + * object, it has no way to set a `face` object. Thus, metrics + * information and `units_per_EM` (which is necessary for OT-SVG) has to + * be stored separately. + * + * @since: + * 2.12 + */ + typedef struct FT_SVG_DocumentRec_ + { + FT_Byte* svg_document; + FT_ULong svg_document_length; + + FT_Size_Metrics metrics; + FT_UShort units_per_EM; + + FT_UShort start_glyph_id; + FT_UShort end_glyph_id; + + FT_Matrix transform; + FT_Vector delta; + + } FT_SVG_DocumentRec; + + + /************************************************************************** + * + * @type: + * FT_SVG_Document + * + * @description: + * A handle to an @FT_SVG_DocumentRec object. + * + * @since: + * 2.12 + */ + typedef struct FT_SVG_DocumentRec_* FT_SVG_Document; + + +FT_END_HEADER + +#endif /* OTSVG_H_ */ + + +/* END */ diff --git a/Example/include/freetype/t1tables.h b/Example/include/freetype/t1tables.h new file mode 100644 index 0000000..1aecfbb --- /dev/null +++ b/Example/include/freetype/t1tables.h @@ -0,0 +1,793 @@ +/**************************************************************************** + * + * t1tables.h + * + * Basic Type 1/Type 2 tables definitions and interface (specification + * only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef T1TABLES_H_ +#define T1TABLES_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * type1_tables + * + * @title: + * Type 1 Tables + * + * @abstract: + * Type~1-specific font tables. + * + * @description: + * This section contains the definition of Type~1-specific tables, + * including structures related to other PostScript font formats. + * + * @order: + * PS_FontInfoRec + * PS_FontInfo + * PS_PrivateRec + * PS_Private + * + * CID_FaceDictRec + * CID_FaceDict + * CID_FaceInfoRec + * CID_FaceInfo + * + * FT_Has_PS_Glyph_Names + * FT_Get_PS_Font_Info + * FT_Get_PS_Font_Private + * FT_Get_PS_Font_Value + * + * T1_Blend_Flags + * T1_EncodingType + * PS_Dict_Keys + * + */ + + + /* Note that we separate font data in PS_FontInfoRec and PS_PrivateRec */ + /* structures in order to support Multiple Master fonts. */ + + + /************************************************************************** + * + * @struct: + * PS_FontInfoRec + * + * @description: + * A structure used to model a Type~1 or Type~2 FontInfo dictionary. + * Note that for Multiple Master fonts, each instance has its own + * FontInfo dictionary. + */ + typedef struct PS_FontInfoRec_ + { + FT_String* version; + FT_String* notice; + FT_String* full_name; + FT_String* family_name; + FT_String* weight; + FT_Long italic_angle; + FT_Bool is_fixed_pitch; + FT_Short underline_position; + FT_UShort underline_thickness; + + } PS_FontInfoRec; + + + /************************************************************************** + * + * @struct: + * PS_FontInfo + * + * @description: + * A handle to a @PS_FontInfoRec structure. + */ + typedef struct PS_FontInfoRec_* PS_FontInfo; + + + /************************************************************************** + * + * @struct: + * T1_FontInfo + * + * @description: + * This type is equivalent to @PS_FontInfoRec. It is deprecated but kept + * to maintain source compatibility between various versions of FreeType. + */ + typedef PS_FontInfoRec T1_FontInfo; + + + /************************************************************************** + * + * @struct: + * PS_PrivateRec + * + * @description: + * A structure used to model a Type~1 or Type~2 private dictionary. Note + * that for Multiple Master fonts, each instance has its own Private + * dictionary. + */ + typedef struct PS_PrivateRec_ + { + FT_Int unique_id; + FT_Int lenIV; + + FT_Byte num_blue_values; + FT_Byte num_other_blues; + FT_Byte num_family_blues; + FT_Byte num_family_other_blues; + + FT_Short blue_values[14]; + FT_Short other_blues[10]; + + FT_Short family_blues [14]; + FT_Short family_other_blues[10]; + + FT_Fixed blue_scale; + FT_Int blue_shift; + FT_Int blue_fuzz; + + FT_UShort standard_width[1]; + FT_UShort standard_height[1]; + + FT_Byte num_snap_widths; + FT_Byte num_snap_heights; + FT_Bool force_bold; + FT_Bool round_stem_up; + + FT_Short snap_widths [13]; /* including std width */ + FT_Short snap_heights[13]; /* including std height */ + + FT_Fixed expansion_factor; + + FT_Long language_group; + FT_Long password; + + FT_Short min_feature[2]; + + } PS_PrivateRec; + + + /************************************************************************** + * + * @struct: + * PS_Private + * + * @description: + * A handle to a @PS_PrivateRec structure. + */ + typedef struct PS_PrivateRec_* PS_Private; + + + /************************************************************************** + * + * @struct: + * T1_Private + * + * @description: + * This type is equivalent to @PS_PrivateRec. It is deprecated but kept + * to maintain source compatibility between various versions of FreeType. + */ + typedef PS_PrivateRec T1_Private; + + + /************************************************************************** + * + * @enum: + * T1_Blend_Flags + * + * @description: + * A set of flags used to indicate which fields are present in a given + * blend dictionary (font info or private). Used to support Multiple + * Masters fonts. + * + * @values: + * T1_BLEND_UNDERLINE_POSITION :: + * T1_BLEND_UNDERLINE_THICKNESS :: + * T1_BLEND_ITALIC_ANGLE :: + * T1_BLEND_BLUE_VALUES :: + * T1_BLEND_OTHER_BLUES :: + * T1_BLEND_STANDARD_WIDTH :: + * T1_BLEND_STANDARD_HEIGHT :: + * T1_BLEND_STEM_SNAP_WIDTHS :: + * T1_BLEND_STEM_SNAP_HEIGHTS :: + * T1_BLEND_BLUE_SCALE :: + * T1_BLEND_BLUE_SHIFT :: + * T1_BLEND_FAMILY_BLUES :: + * T1_BLEND_FAMILY_OTHER_BLUES :: + * T1_BLEND_FORCE_BOLD :: + */ + typedef enum T1_Blend_Flags_ + { + /* required fields in a FontInfo blend dictionary */ + T1_BLEND_UNDERLINE_POSITION = 0, + T1_BLEND_UNDERLINE_THICKNESS, + T1_BLEND_ITALIC_ANGLE, + + /* required fields in a Private blend dictionary */ + T1_BLEND_BLUE_VALUES, + T1_BLEND_OTHER_BLUES, + T1_BLEND_STANDARD_WIDTH, + T1_BLEND_STANDARD_HEIGHT, + T1_BLEND_STEM_SNAP_WIDTHS, + T1_BLEND_STEM_SNAP_HEIGHTS, + T1_BLEND_BLUE_SCALE, + T1_BLEND_BLUE_SHIFT, + T1_BLEND_FAMILY_BLUES, + T1_BLEND_FAMILY_OTHER_BLUES, + T1_BLEND_FORCE_BOLD, + + T1_BLEND_MAX /* do not remove */ + + } T1_Blend_Flags; + + + /* these constants are deprecated; use the corresponding */ + /* `T1_Blend_Flags` values instead */ +#define t1_blend_underline_position T1_BLEND_UNDERLINE_POSITION +#define t1_blend_underline_thickness T1_BLEND_UNDERLINE_THICKNESS +#define t1_blend_italic_angle T1_BLEND_ITALIC_ANGLE +#define t1_blend_blue_values T1_BLEND_BLUE_VALUES +#define t1_blend_other_blues T1_BLEND_OTHER_BLUES +#define t1_blend_standard_widths T1_BLEND_STANDARD_WIDTH +#define t1_blend_standard_height T1_BLEND_STANDARD_HEIGHT +#define t1_blend_stem_snap_widths T1_BLEND_STEM_SNAP_WIDTHS +#define t1_blend_stem_snap_heights T1_BLEND_STEM_SNAP_HEIGHTS +#define t1_blend_blue_scale T1_BLEND_BLUE_SCALE +#define t1_blend_blue_shift T1_BLEND_BLUE_SHIFT +#define t1_blend_family_blues T1_BLEND_FAMILY_BLUES +#define t1_blend_family_other_blues T1_BLEND_FAMILY_OTHER_BLUES +#define t1_blend_force_bold T1_BLEND_FORCE_BOLD +#define t1_blend_max T1_BLEND_MAX + + /* */ + + + /* maximum number of Multiple Masters designs, as defined in the spec */ +#define T1_MAX_MM_DESIGNS 16 + + /* maximum number of Multiple Masters axes, as defined in the spec */ +#define T1_MAX_MM_AXIS 4 + + /* maximum number of elements in a design map */ +#define T1_MAX_MM_MAP_POINTS 20 + + + /* this structure is used to store the BlendDesignMap entry for an axis */ + typedef struct PS_DesignMap_ + { + FT_Byte num_points; + FT_Long* design_points; + FT_Fixed* blend_points; + + } PS_DesignMapRec, *PS_DesignMap; + + /* backward compatible definition */ + typedef PS_DesignMapRec T1_DesignMap; + + + typedef struct PS_BlendRec_ + { + FT_UInt num_designs; + FT_UInt num_axis; + + FT_String* axis_names[T1_MAX_MM_AXIS]; + FT_Fixed* design_pos[T1_MAX_MM_DESIGNS]; + PS_DesignMapRec design_map[T1_MAX_MM_AXIS]; + + FT_Fixed* weight_vector; + FT_Fixed* default_weight_vector; + + PS_FontInfo font_infos[T1_MAX_MM_DESIGNS + 1]; + PS_Private privates [T1_MAX_MM_DESIGNS + 1]; + + FT_ULong blend_bitflags; + + FT_BBox* bboxes [T1_MAX_MM_DESIGNS + 1]; + + /* since 2.3.0 */ + + /* undocumented, optional: the default design instance; */ + /* corresponds to default_weight_vector -- */ + /* num_default_design_vector == 0 means it is not present */ + /* in the font and associated metrics files */ + FT_UInt default_design_vector[T1_MAX_MM_DESIGNS]; + FT_UInt num_default_design_vector; + + } PS_BlendRec, *PS_Blend; + + + /* backward compatible definition */ + typedef PS_BlendRec T1_Blend; + + + /************************************************************************** + * + * @struct: + * CID_FaceDictRec + * + * @description: + * A structure used to represent data in a CID top-level dictionary. In + * most cases, they are part of the font's '/FDArray' array. Within a + * CID font file, such (internal) subfont dictionaries are enclosed by + * '%ADOBeginFontDict' and '%ADOEndFontDict' comments. + * + * Note that `CID_FaceDictRec` misses a field for the '/FontName' + * keyword, specifying the subfont's name (the top-level font name is + * given by the '/CIDFontName' keyword). This is an oversight, but it + * doesn't limit the 'cid' font module's functionality because FreeType + * neither needs this entry nor gives access to CID subfonts. + */ + typedef struct CID_FaceDictRec_ + { + PS_PrivateRec private_dict; + + FT_UInt len_buildchar; + FT_Fixed forcebold_threshold; + FT_Pos stroke_width; + FT_Fixed expansion_factor; /* this is a duplicate of */ + /* `private_dict->expansion_factor' */ + FT_Byte paint_type; + FT_Byte font_type; + FT_Matrix font_matrix; + FT_Vector font_offset; + + FT_UInt num_subrs; + FT_ULong subrmap_offset; + FT_UInt sd_bytes; + + } CID_FaceDictRec; + + + /************************************************************************** + * + * @struct: + * CID_FaceDict + * + * @description: + * A handle to a @CID_FaceDictRec structure. + */ + typedef struct CID_FaceDictRec_* CID_FaceDict; + + + /************************************************************************** + * + * @struct: + * CID_FontDict + * + * @description: + * This type is equivalent to @CID_FaceDictRec. It is deprecated but + * kept to maintain source compatibility between various versions of + * FreeType. + */ + typedef CID_FaceDictRec CID_FontDict; + + + /************************************************************************** + * + * @struct: + * CID_FaceInfoRec + * + * @description: + * A structure used to represent CID Face information. + */ + typedef struct CID_FaceInfoRec_ + { + FT_String* cid_font_name; + FT_Fixed cid_version; + FT_Int cid_font_type; + + FT_String* registry; + FT_String* ordering; + FT_Int supplement; + + PS_FontInfoRec font_info; + FT_BBox font_bbox; + FT_ULong uid_base; + + FT_Int num_xuid; + FT_ULong xuid[16]; + + FT_ULong cidmap_offset; + FT_UInt fd_bytes; + FT_UInt gd_bytes; + FT_ULong cid_count; + + FT_UInt num_dicts; + CID_FaceDict font_dicts; + + FT_ULong data_offset; + + } CID_FaceInfoRec; + + + /************************************************************************** + * + * @struct: + * CID_FaceInfo + * + * @description: + * A handle to a @CID_FaceInfoRec structure. + */ + typedef struct CID_FaceInfoRec_* CID_FaceInfo; + + + /************************************************************************** + * + * @struct: + * CID_Info + * + * @description: + * This type is equivalent to @CID_FaceInfoRec. It is deprecated but kept + * to maintain source compatibility between various versions of FreeType. + */ + typedef CID_FaceInfoRec CID_Info; + + + /************************************************************************** + * + * @function: + * FT_Has_PS_Glyph_Names + * + * @description: + * Return true if a given face provides reliable PostScript glyph names. + * This is similar to using the @FT_HAS_GLYPH_NAMES macro, except that + * certain fonts (mostly TrueType) contain incorrect glyph name tables. + * + * When this function returns true, the caller is sure that the glyph + * names returned by @FT_Get_Glyph_Name are reliable. + * + * @input: + * face :: + * face handle + * + * @return: + * Boolean. True if glyph names are reliable. + * + */ + FT_EXPORT( FT_Int ) + FT_Has_PS_Glyph_Names( FT_Face face ); + + + /************************************************************************** + * + * @function: + * FT_Get_PS_Font_Info + * + * @description: + * Retrieve the @PS_FontInfoRec structure corresponding to a given + * PostScript font. + * + * @input: + * face :: + * PostScript face handle. + * + * @output: + * afont_info :: + * A pointer to a @PS_FontInfoRec object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * String pointers within the @PS_FontInfoRec structure are owned by the + * face and don't need to be freed by the caller. Missing entries in the + * font's FontInfo dictionary are represented by `NULL` pointers. + * + * The following font formats support this feature: 'Type~1', 'Type~42', + * 'CFF', 'CID~Type~1'. For other font formats this function returns the + * `FT_Err_Invalid_Argument` error code. + * + * @example: + * ``` + * PS_FontInfoRec font_info; + * + * + * error = FT_Get_PS_Font_Info( face, &font_info ); + * ... + * ``` + * + */ + FT_EXPORT( FT_Error ) + FT_Get_PS_Font_Info( FT_Face face, + PS_FontInfo afont_info ); + + + /************************************************************************** + * + * @function: + * FT_Get_PS_Font_Private + * + * @description: + * Retrieve the @PS_PrivateRec structure corresponding to a given + * PostScript font. + * + * @input: + * face :: + * PostScript face handle. + * + * @output: + * afont_private :: + * A pointer to a @PS_PrivateRec object. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * The string pointers within the @PS_PrivateRec structure are owned by + * the face and don't need to be freed by the caller. + * + * Only the 'Type~1' font format supports this feature. For other font + * formats this function returns the `FT_Err_Invalid_Argument` error + * code. + * + * @example: + * ``` + * PS_PrivateRec font_private; + * + * + * error = FT_Get_PS_Font_Private( face, &font_private ); + * ... + * ``` + * + */ + FT_EXPORT( FT_Error ) + FT_Get_PS_Font_Private( FT_Face face, + PS_Private afont_private ); + + + /************************************************************************** + * + * @enum: + * T1_EncodingType + * + * @description: + * An enumeration describing the 'Encoding' entry in a Type 1 dictionary. + * + * @values: + * T1_ENCODING_TYPE_NONE :: + * T1_ENCODING_TYPE_ARRAY :: + * T1_ENCODING_TYPE_STANDARD :: + * T1_ENCODING_TYPE_ISOLATIN1 :: + * T1_ENCODING_TYPE_EXPERT :: + * + * @since: + * 2.4.8 + */ + typedef enum T1_EncodingType_ + { + T1_ENCODING_TYPE_NONE = 0, + T1_ENCODING_TYPE_ARRAY, + T1_ENCODING_TYPE_STANDARD, + T1_ENCODING_TYPE_ISOLATIN1, + T1_ENCODING_TYPE_EXPERT + + } T1_EncodingType; + + + /************************************************************************** + * + * @enum: + * PS_Dict_Keys + * + * @description: + * An enumeration used in calls to @FT_Get_PS_Font_Value to identify the + * Type~1 dictionary entry to retrieve. + * + * @values: + * PS_DICT_FONT_TYPE :: + * PS_DICT_FONT_MATRIX :: + * PS_DICT_FONT_BBOX :: + * PS_DICT_PAINT_TYPE :: + * PS_DICT_FONT_NAME :: + * PS_DICT_UNIQUE_ID :: + * PS_DICT_NUM_CHAR_STRINGS :: + * PS_DICT_CHAR_STRING_KEY :: + * PS_DICT_CHAR_STRING :: + * PS_DICT_ENCODING_TYPE :: + * PS_DICT_ENCODING_ENTRY :: + * PS_DICT_NUM_SUBRS :: + * PS_DICT_SUBR :: + * PS_DICT_STD_HW :: + * PS_DICT_STD_VW :: + * PS_DICT_NUM_BLUE_VALUES :: + * PS_DICT_BLUE_VALUE :: + * PS_DICT_BLUE_FUZZ :: + * PS_DICT_NUM_OTHER_BLUES :: + * PS_DICT_OTHER_BLUE :: + * PS_DICT_NUM_FAMILY_BLUES :: + * PS_DICT_FAMILY_BLUE :: + * PS_DICT_NUM_FAMILY_OTHER_BLUES :: + * PS_DICT_FAMILY_OTHER_BLUE :: + * PS_DICT_BLUE_SCALE :: + * PS_DICT_BLUE_SHIFT :: + * PS_DICT_NUM_STEM_SNAP_H :: + * PS_DICT_STEM_SNAP_H :: + * PS_DICT_NUM_STEM_SNAP_V :: + * PS_DICT_STEM_SNAP_V :: + * PS_DICT_FORCE_BOLD :: + * PS_DICT_RND_STEM_UP :: + * PS_DICT_MIN_FEATURE :: + * PS_DICT_LEN_IV :: + * PS_DICT_PASSWORD :: + * PS_DICT_LANGUAGE_GROUP :: + * PS_DICT_VERSION :: + * PS_DICT_NOTICE :: + * PS_DICT_FULL_NAME :: + * PS_DICT_FAMILY_NAME :: + * PS_DICT_WEIGHT :: + * PS_DICT_IS_FIXED_PITCH :: + * PS_DICT_UNDERLINE_POSITION :: + * PS_DICT_UNDERLINE_THICKNESS :: + * PS_DICT_FS_TYPE :: + * PS_DICT_ITALIC_ANGLE :: + * + * @since: + * 2.4.8 + */ + typedef enum PS_Dict_Keys_ + { + /* conventionally in the font dictionary */ + PS_DICT_FONT_TYPE, /* FT_Byte */ + PS_DICT_FONT_MATRIX, /* FT_Fixed */ + PS_DICT_FONT_BBOX, /* FT_Fixed */ + PS_DICT_PAINT_TYPE, /* FT_Byte */ + PS_DICT_FONT_NAME, /* FT_String* */ + PS_DICT_UNIQUE_ID, /* FT_Int */ + PS_DICT_NUM_CHAR_STRINGS, /* FT_Int */ + PS_DICT_CHAR_STRING_KEY, /* FT_String* */ + PS_DICT_CHAR_STRING, /* FT_String* */ + PS_DICT_ENCODING_TYPE, /* T1_EncodingType */ + PS_DICT_ENCODING_ENTRY, /* FT_String* */ + + /* conventionally in the font Private dictionary */ + PS_DICT_NUM_SUBRS, /* FT_Int */ + PS_DICT_SUBR, /* FT_String* */ + PS_DICT_STD_HW, /* FT_UShort */ + PS_DICT_STD_VW, /* FT_UShort */ + PS_DICT_NUM_BLUE_VALUES, /* FT_Byte */ + PS_DICT_BLUE_VALUE, /* FT_Short */ + PS_DICT_BLUE_FUZZ, /* FT_Int */ + PS_DICT_NUM_OTHER_BLUES, /* FT_Byte */ + PS_DICT_OTHER_BLUE, /* FT_Short */ + PS_DICT_NUM_FAMILY_BLUES, /* FT_Byte */ + PS_DICT_FAMILY_BLUE, /* FT_Short */ + PS_DICT_NUM_FAMILY_OTHER_BLUES, /* FT_Byte */ + PS_DICT_FAMILY_OTHER_BLUE, /* FT_Short */ + PS_DICT_BLUE_SCALE, /* FT_Fixed */ + PS_DICT_BLUE_SHIFT, /* FT_Int */ + PS_DICT_NUM_STEM_SNAP_H, /* FT_Byte */ + PS_DICT_STEM_SNAP_H, /* FT_Short */ + PS_DICT_NUM_STEM_SNAP_V, /* FT_Byte */ + PS_DICT_STEM_SNAP_V, /* FT_Short */ + PS_DICT_FORCE_BOLD, /* FT_Bool */ + PS_DICT_RND_STEM_UP, /* FT_Bool */ + PS_DICT_MIN_FEATURE, /* FT_Short */ + PS_DICT_LEN_IV, /* FT_Int */ + PS_DICT_PASSWORD, /* FT_Long */ + PS_DICT_LANGUAGE_GROUP, /* FT_Long */ + + /* conventionally in the font FontInfo dictionary */ + PS_DICT_VERSION, /* FT_String* */ + PS_DICT_NOTICE, /* FT_String* */ + PS_DICT_FULL_NAME, /* FT_String* */ + PS_DICT_FAMILY_NAME, /* FT_String* */ + PS_DICT_WEIGHT, /* FT_String* */ + PS_DICT_IS_FIXED_PITCH, /* FT_Bool */ + PS_DICT_UNDERLINE_POSITION, /* FT_Short */ + PS_DICT_UNDERLINE_THICKNESS, /* FT_UShort */ + PS_DICT_FS_TYPE, /* FT_UShort */ + PS_DICT_ITALIC_ANGLE, /* FT_Long */ + + PS_DICT_MAX = PS_DICT_ITALIC_ANGLE + + } PS_Dict_Keys; + + + /************************************************************************** + * + * @function: + * FT_Get_PS_Font_Value + * + * @description: + * Retrieve the value for the supplied key from a PostScript font. + * + * @input: + * face :: + * PostScript face handle. + * + * key :: + * An enumeration value representing the dictionary key to retrieve. + * + * idx :: + * For array values, this specifies the index to be returned. + * + * value :: + * A pointer to memory into which to write the value. + * + * valen_len :: + * The size, in bytes, of the memory supplied for the value. + * + * @output: + * value :: + * The value matching the above key, if it exists. + * + * @return: + * The amount of memory (in bytes) required to hold the requested value + * (if it exists, -1 otherwise). + * + * @note: + * The values returned are not pointers into the internal structures of + * the face, but are 'fresh' copies, so that the memory containing them + * belongs to the calling application. This also enforces the + * 'read-only' nature of these values, i.e., this function cannot be + * used to manipulate the face. + * + * `value` is a void pointer because the values returned can be of + * various types. + * + * If either `value` is `NULL` or `value_len` is too small, just the + * required memory size for the requested entry is returned. + * + * The `idx` parameter is used, not only to retrieve elements of, for + * example, the FontMatrix or FontBBox, but also to retrieve name keys + * from the CharStrings dictionary, and the charstrings themselves. It + * is ignored for atomic values. + * + * `PS_DICT_BLUE_SCALE` returns a value that is scaled up by 1000. To + * get the value as in the font stream, you need to divide by 65536000.0 + * (to remove the FT_Fixed scale, and the x1000 scale). + * + * IMPORTANT: Only key/value pairs read by the FreeType interpreter can + * be retrieved. So, for example, PostScript procedures such as NP, ND, + * and RD are not available. Arbitrary keys are, obviously, not be + * available either. + * + * If the font's format is not PostScript-based, this function returns + * the `FT_Err_Invalid_Argument` error code. + * + * @since: + * 2.4.8 + * + */ + FT_EXPORT( FT_Long ) + FT_Get_PS_Font_Value( FT_Face face, + PS_Dict_Keys key, + FT_UInt idx, + void *value, + FT_Long value_len ); + + /* */ + +FT_END_HEADER + +#endif /* T1TABLES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/ttnameid.h b/Example/include/freetype/ttnameid.h new file mode 100644 index 0000000..e31c68b --- /dev/null +++ b/Example/include/freetype/ttnameid.h @@ -0,0 +1,1235 @@ +/**************************************************************************** + * + * ttnameid.h + * + * TrueType name ID definitions (specification only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef TTNAMEID_H_ +#define TTNAMEID_H_ + + + + +FT_BEGIN_HEADER + + + /************************************************************************** + * + * @section: + * truetype_tables + */ + + + /************************************************************************** + * + * Possible values for the 'platform' identifier code in the name records + * of an SFNT 'name' table. + * + */ + + + /************************************************************************** + * + * @enum: + * TT_PLATFORM_XXX + * + * @description: + * A list of valid values for the `platform_id` identifier code in + * @FT_CharMapRec and @FT_SfntName structures. + * + * @values: + * TT_PLATFORM_APPLE_UNICODE :: + * Used by Apple to indicate a Unicode character map and/or name entry. + * See @TT_APPLE_ID_XXX for corresponding `encoding_id` values. Note + * that name entries in this format are coded as big-endian UCS-2 + * character codes _only_. + * + * TT_PLATFORM_MACINTOSH :: + * Used by Apple to indicate a MacOS-specific charmap and/or name + * entry. See @TT_MAC_ID_XXX for corresponding `encoding_id` values. + * Note that most TrueType fonts contain an Apple roman charmap to be + * usable on MacOS systems (even if they contain a Microsoft charmap as + * well). + * + * TT_PLATFORM_ISO :: + * This value was used to specify ISO/IEC 10646 charmaps. It is + * however now deprecated. See @TT_ISO_ID_XXX for a list of + * corresponding `encoding_id` values. + * + * TT_PLATFORM_MICROSOFT :: + * Used by Microsoft to indicate Windows-specific charmaps. See + * @TT_MS_ID_XXX for a list of corresponding `encoding_id` values. + * Note that most fonts contain a Unicode charmap using + * (`TT_PLATFORM_MICROSOFT`, @TT_MS_ID_UNICODE_CS). + * + * TT_PLATFORM_CUSTOM :: + * Used to indicate application-specific charmaps. + * + * TT_PLATFORM_ADOBE :: + * This value isn't part of any font format specification, but is used + * by FreeType to report Adobe-specific charmaps in an @FT_CharMapRec + * structure. See @TT_ADOBE_ID_XXX. + */ + +#define TT_PLATFORM_APPLE_UNICODE 0 +#define TT_PLATFORM_MACINTOSH 1 +#define TT_PLATFORM_ISO 2 /* deprecated */ +#define TT_PLATFORM_MICROSOFT 3 +#define TT_PLATFORM_CUSTOM 4 +#define TT_PLATFORM_ADOBE 7 /* artificial */ + + + /************************************************************************** + * + * @enum: + * TT_APPLE_ID_XXX + * + * @description: + * A list of valid values for the `encoding_id` for + * @TT_PLATFORM_APPLE_UNICODE charmaps and name entries. + * + * @values: + * TT_APPLE_ID_DEFAULT :: + * Unicode version 1.0. + * + * TT_APPLE_ID_UNICODE_1_1 :: + * Unicode 1.1; specifies Hangul characters starting at U+34xx. + * + * TT_APPLE_ID_ISO_10646 :: + * Deprecated (identical to preceding). + * + * TT_APPLE_ID_UNICODE_2_0 :: + * Unicode 2.0 and beyond (UTF-16 BMP only). + * + * TT_APPLE_ID_UNICODE_32 :: + * Unicode 3.1 and beyond, using UTF-32. + * + * TT_APPLE_ID_VARIANT_SELECTOR :: + * From Adobe, not Apple. Not a normal cmap. Specifies variations on + * a real cmap. + * + * TT_APPLE_ID_FULL_UNICODE :: + * Used for fallback fonts that provide complete Unicode coverage with + * a type~13 cmap. + */ + +#define TT_APPLE_ID_DEFAULT 0 /* Unicode 1.0 */ +#define TT_APPLE_ID_UNICODE_1_1 1 /* specify Hangul at U+34xx */ +#define TT_APPLE_ID_ISO_10646 2 /* deprecated */ +#define TT_APPLE_ID_UNICODE_2_0 3 /* or later */ +#define TT_APPLE_ID_UNICODE_32 4 /* 2.0 or later, full repertoire */ +#define TT_APPLE_ID_VARIANT_SELECTOR 5 /* variation selector data */ +#define TT_APPLE_ID_FULL_UNICODE 6 /* used with type 13 cmaps */ + + + /************************************************************************** + * + * @enum: + * TT_MAC_ID_XXX + * + * @description: + * A list of valid values for the `encoding_id` for + * @TT_PLATFORM_MACINTOSH charmaps and name entries. + */ + +#define TT_MAC_ID_ROMAN 0 +#define TT_MAC_ID_JAPANESE 1 +#define TT_MAC_ID_TRADITIONAL_CHINESE 2 +#define TT_MAC_ID_KOREAN 3 +#define TT_MAC_ID_ARABIC 4 +#define TT_MAC_ID_HEBREW 5 +#define TT_MAC_ID_GREEK 6 +#define TT_MAC_ID_RUSSIAN 7 +#define TT_MAC_ID_RSYMBOL 8 +#define TT_MAC_ID_DEVANAGARI 9 +#define TT_MAC_ID_GURMUKHI 10 +#define TT_MAC_ID_GUJARATI 11 +#define TT_MAC_ID_ORIYA 12 +#define TT_MAC_ID_BENGALI 13 +#define TT_MAC_ID_TAMIL 14 +#define TT_MAC_ID_TELUGU 15 +#define TT_MAC_ID_KANNADA 16 +#define TT_MAC_ID_MALAYALAM 17 +#define TT_MAC_ID_SINHALESE 18 +#define TT_MAC_ID_BURMESE 19 +#define TT_MAC_ID_KHMER 20 +#define TT_MAC_ID_THAI 21 +#define TT_MAC_ID_LAOTIAN 22 +#define TT_MAC_ID_GEORGIAN 23 +#define TT_MAC_ID_ARMENIAN 24 +#define TT_MAC_ID_MALDIVIAN 25 +#define TT_MAC_ID_SIMPLIFIED_CHINESE 25 +#define TT_MAC_ID_TIBETAN 26 +#define TT_MAC_ID_MONGOLIAN 27 +#define TT_MAC_ID_GEEZ 28 +#define TT_MAC_ID_SLAVIC 29 +#define TT_MAC_ID_VIETNAMESE 30 +#define TT_MAC_ID_SINDHI 31 +#define TT_MAC_ID_UNINTERP 32 + + + /************************************************************************** + * + * @enum: + * TT_ISO_ID_XXX + * + * @description: + * A list of valid values for the `encoding_id` for @TT_PLATFORM_ISO + * charmaps and name entries. + * + * Their use is now deprecated. + * + * @values: + * TT_ISO_ID_7BIT_ASCII :: + * ASCII. + * TT_ISO_ID_10646 :: + * ISO/10646. + * TT_ISO_ID_8859_1 :: + * Also known as Latin-1. + */ + +#define TT_ISO_ID_7BIT_ASCII 0 +#define TT_ISO_ID_10646 1 +#define TT_ISO_ID_8859_1 2 + + + /************************************************************************** + * + * @enum: + * TT_MS_ID_XXX + * + * @description: + * A list of valid values for the `encoding_id` for + * @TT_PLATFORM_MICROSOFT charmaps and name entries. + * + * @values: + * TT_MS_ID_SYMBOL_CS :: + * Microsoft symbol encoding. See @FT_ENCODING_MS_SYMBOL. + * + * TT_MS_ID_UNICODE_CS :: + * Microsoft WGL4 charmap, matching Unicode. See @FT_ENCODING_UNICODE. + * + * TT_MS_ID_SJIS :: + * Shift JIS Japanese encoding. See @FT_ENCODING_SJIS. + * + * TT_MS_ID_PRC :: + * Chinese encodings as used in the People's Republic of China (PRC). + * This means the encodings GB~2312 and its supersets GBK and GB~18030. + * See @FT_ENCODING_PRC. + * + * TT_MS_ID_BIG_5 :: + * Traditional Chinese as used in Taiwan and Hong Kong. See + * @FT_ENCODING_BIG5. + * + * TT_MS_ID_WANSUNG :: + * Korean Extended Wansung encoding. See @FT_ENCODING_WANSUNG. + * + * TT_MS_ID_JOHAB :: + * Korean Johab encoding. See @FT_ENCODING_JOHAB. + * + * TT_MS_ID_UCS_4 :: + * UCS-4 or UTF-32 charmaps. This has been added to the OpenType + * specification version 1.4 (mid-2001). + */ + +#define TT_MS_ID_SYMBOL_CS 0 +#define TT_MS_ID_UNICODE_CS 1 +#define TT_MS_ID_SJIS 2 +#define TT_MS_ID_PRC 3 +#define TT_MS_ID_BIG_5 4 +#define TT_MS_ID_WANSUNG 5 +#define TT_MS_ID_JOHAB 6 +#define TT_MS_ID_UCS_4 10 + + /* this value is deprecated */ +#define TT_MS_ID_GB2312 TT_MS_ID_PRC + + + /************************************************************************** + * + * @enum: + * TT_ADOBE_ID_XXX + * + * @description: + * A list of valid values for the `encoding_id` for @TT_PLATFORM_ADOBE + * charmaps. This is a FreeType-specific extension! + * + * @values: + * TT_ADOBE_ID_STANDARD :: + * Adobe standard encoding. + * TT_ADOBE_ID_EXPERT :: + * Adobe expert encoding. + * TT_ADOBE_ID_CUSTOM :: + * Adobe custom encoding. + * TT_ADOBE_ID_LATIN_1 :: + * Adobe Latin~1 encoding. + */ + +#define TT_ADOBE_ID_STANDARD 0 +#define TT_ADOBE_ID_EXPERT 1 +#define TT_ADOBE_ID_CUSTOM 2 +#define TT_ADOBE_ID_LATIN_1 3 + + + /************************************************************************** + * + * @enum: + * TT_MAC_LANGID_XXX + * + * @description: + * Possible values of the language identifier field in the name records + * of the SFNT 'name' table if the 'platform' identifier code is + * @TT_PLATFORM_MACINTOSH. These values are also used as return values + * for function @FT_Get_CMap_Language_ID. + * + * The canonical source for Apple's IDs is + * + * https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html + */ + +#define TT_MAC_LANGID_ENGLISH 0 +#define TT_MAC_LANGID_FRENCH 1 +#define TT_MAC_LANGID_GERMAN 2 +#define TT_MAC_LANGID_ITALIAN 3 +#define TT_MAC_LANGID_DUTCH 4 +#define TT_MAC_LANGID_SWEDISH 5 +#define TT_MAC_LANGID_SPANISH 6 +#define TT_MAC_LANGID_DANISH 7 +#define TT_MAC_LANGID_PORTUGUESE 8 +#define TT_MAC_LANGID_NORWEGIAN 9 +#define TT_MAC_LANGID_HEBREW 10 +#define TT_MAC_LANGID_JAPANESE 11 +#define TT_MAC_LANGID_ARABIC 12 +#define TT_MAC_LANGID_FINNISH 13 +#define TT_MAC_LANGID_GREEK 14 +#define TT_MAC_LANGID_ICELANDIC 15 +#define TT_MAC_LANGID_MALTESE 16 +#define TT_MAC_LANGID_TURKISH 17 +#define TT_MAC_LANGID_CROATIAN 18 +#define TT_MAC_LANGID_CHINESE_TRADITIONAL 19 +#define TT_MAC_LANGID_URDU 20 +#define TT_MAC_LANGID_HINDI 21 +#define TT_MAC_LANGID_THAI 22 +#define TT_MAC_LANGID_KOREAN 23 +#define TT_MAC_LANGID_LITHUANIAN 24 +#define TT_MAC_LANGID_POLISH 25 +#define TT_MAC_LANGID_HUNGARIAN 26 +#define TT_MAC_LANGID_ESTONIAN 27 +#define TT_MAC_LANGID_LETTISH 28 +#define TT_MAC_LANGID_SAAMISK 29 +#define TT_MAC_LANGID_FAEROESE 30 +#define TT_MAC_LANGID_FARSI 31 +#define TT_MAC_LANGID_RUSSIAN 32 +#define TT_MAC_LANGID_CHINESE_SIMPLIFIED 33 +#define TT_MAC_LANGID_FLEMISH 34 +#define TT_MAC_LANGID_IRISH 35 +#define TT_MAC_LANGID_ALBANIAN 36 +#define TT_MAC_LANGID_ROMANIAN 37 +#define TT_MAC_LANGID_CZECH 38 +#define TT_MAC_LANGID_SLOVAK 39 +#define TT_MAC_LANGID_SLOVENIAN 40 +#define TT_MAC_LANGID_YIDDISH 41 +#define TT_MAC_LANGID_SERBIAN 42 +#define TT_MAC_LANGID_MACEDONIAN 43 +#define TT_MAC_LANGID_BULGARIAN 44 +#define TT_MAC_LANGID_UKRAINIAN 45 +#define TT_MAC_LANGID_BYELORUSSIAN 46 +#define TT_MAC_LANGID_UZBEK 47 +#define TT_MAC_LANGID_KAZAKH 48 +#define TT_MAC_LANGID_AZERBAIJANI 49 +#define TT_MAC_LANGID_AZERBAIJANI_CYRILLIC_SCRIPT 49 +#define TT_MAC_LANGID_AZERBAIJANI_ARABIC_SCRIPT 50 +#define TT_MAC_LANGID_ARMENIAN 51 +#define TT_MAC_LANGID_GEORGIAN 52 +#define TT_MAC_LANGID_MOLDAVIAN 53 +#define TT_MAC_LANGID_KIRGHIZ 54 +#define TT_MAC_LANGID_TAJIKI 55 +#define TT_MAC_LANGID_TURKMEN 56 +#define TT_MAC_LANGID_MONGOLIAN 57 +#define TT_MAC_LANGID_MONGOLIAN_MONGOLIAN_SCRIPT 57 +#define TT_MAC_LANGID_MONGOLIAN_CYRILLIC_SCRIPT 58 +#define TT_MAC_LANGID_PASHTO 59 +#define TT_MAC_LANGID_KURDISH 60 +#define TT_MAC_LANGID_KASHMIRI 61 +#define TT_MAC_LANGID_SINDHI 62 +#define TT_MAC_LANGID_TIBETAN 63 +#define TT_MAC_LANGID_NEPALI 64 +#define TT_MAC_LANGID_SANSKRIT 65 +#define TT_MAC_LANGID_MARATHI 66 +#define TT_MAC_LANGID_BENGALI 67 +#define TT_MAC_LANGID_ASSAMESE 68 +#define TT_MAC_LANGID_GUJARATI 69 +#define TT_MAC_LANGID_PUNJABI 70 +#define TT_MAC_LANGID_ORIYA 71 +#define TT_MAC_LANGID_MALAYALAM 72 +#define TT_MAC_LANGID_KANNADA 73 +#define TT_MAC_LANGID_TAMIL 74 +#define TT_MAC_LANGID_TELUGU 75 +#define TT_MAC_LANGID_SINHALESE 76 +#define TT_MAC_LANGID_BURMESE 77 +#define TT_MAC_LANGID_KHMER 78 +#define TT_MAC_LANGID_LAO 79 +#define TT_MAC_LANGID_VIETNAMESE 80 +#define TT_MAC_LANGID_INDONESIAN 81 +#define TT_MAC_LANGID_TAGALOG 82 +#define TT_MAC_LANGID_MALAY_ROMAN_SCRIPT 83 +#define TT_MAC_LANGID_MALAY_ARABIC_SCRIPT 84 +#define TT_MAC_LANGID_AMHARIC 85 +#define TT_MAC_LANGID_TIGRINYA 86 +#define TT_MAC_LANGID_GALLA 87 +#define TT_MAC_LANGID_SOMALI 88 +#define TT_MAC_LANGID_SWAHILI 89 +#define TT_MAC_LANGID_RUANDA 90 +#define TT_MAC_LANGID_RUNDI 91 +#define TT_MAC_LANGID_CHEWA 92 +#define TT_MAC_LANGID_MALAGASY 93 +#define TT_MAC_LANGID_ESPERANTO 94 +#define TT_MAC_LANGID_WELSH 128 +#define TT_MAC_LANGID_BASQUE 129 +#define TT_MAC_LANGID_CATALAN 130 +#define TT_MAC_LANGID_LATIN 131 +#define TT_MAC_LANGID_QUECHUA 132 +#define TT_MAC_LANGID_GUARANI 133 +#define TT_MAC_LANGID_AYMARA 134 +#define TT_MAC_LANGID_TATAR 135 +#define TT_MAC_LANGID_UIGHUR 136 +#define TT_MAC_LANGID_DZONGKHA 137 +#define TT_MAC_LANGID_JAVANESE 138 +#define TT_MAC_LANGID_SUNDANESE 139 + + /* The following codes are new as of 2000-03-10 */ +#define TT_MAC_LANGID_GALICIAN 140 +#define TT_MAC_LANGID_AFRIKAANS 141 +#define TT_MAC_LANGID_BRETON 142 +#define TT_MAC_LANGID_INUKTITUT 143 +#define TT_MAC_LANGID_SCOTTISH_GAELIC 144 +#define TT_MAC_LANGID_MANX_GAELIC 145 +#define TT_MAC_LANGID_IRISH_GAELIC 146 +#define TT_MAC_LANGID_TONGAN 147 +#define TT_MAC_LANGID_GREEK_POLYTONIC 148 +#define TT_MAC_LANGID_GREELANDIC 149 +#define TT_MAC_LANGID_AZERBAIJANI_ROMAN_SCRIPT 150 + + + /************************************************************************** + * + * @enum: + * TT_MS_LANGID_XXX + * + * @description: + * Possible values of the language identifier field in the name records + * of the SFNT 'name' table if the 'platform' identifier code is + * @TT_PLATFORM_MICROSOFT. These values are also used as return values + * for function @FT_Get_CMap_Language_ID. + * + * The canonical source for Microsoft's IDs is + * + * https://docs.microsoft.com/en-us/windows/desktop/Intl/language-identifier-constants-and-strings , + * + * however, we only provide macros for language identifiers present in + * the OpenType specification: Microsoft has abandoned the concept of + * LCIDs (language code identifiers), and format~1 of the 'name' table + * provides a better mechanism for languages not covered here. + * + * More legacy values not listed in the reference can be found in the + * @FT_TRUETYPE_IDS_H header file. + */ + +#define TT_MS_LANGID_ARABIC_SAUDI_ARABIA 0x0401 +#define TT_MS_LANGID_ARABIC_IRAQ 0x0801 +#define TT_MS_LANGID_ARABIC_EGYPT 0x0C01 +#define TT_MS_LANGID_ARABIC_LIBYA 0x1001 +#define TT_MS_LANGID_ARABIC_ALGERIA 0x1401 +#define TT_MS_LANGID_ARABIC_MOROCCO 0x1801 +#define TT_MS_LANGID_ARABIC_TUNISIA 0x1C01 +#define TT_MS_LANGID_ARABIC_OMAN 0x2001 +#define TT_MS_LANGID_ARABIC_YEMEN 0x2401 +#define TT_MS_LANGID_ARABIC_SYRIA 0x2801 +#define TT_MS_LANGID_ARABIC_JORDAN 0x2C01 +#define TT_MS_LANGID_ARABIC_LEBANON 0x3001 +#define TT_MS_LANGID_ARABIC_KUWAIT 0x3401 +#define TT_MS_LANGID_ARABIC_UAE 0x3801 +#define TT_MS_LANGID_ARABIC_BAHRAIN 0x3C01 +#define TT_MS_LANGID_ARABIC_QATAR 0x4001 +#define TT_MS_LANGID_BULGARIAN_BULGARIA 0x0402 +#define TT_MS_LANGID_CATALAN_CATALAN 0x0403 +#define TT_MS_LANGID_CHINESE_TAIWAN 0x0404 +#define TT_MS_LANGID_CHINESE_PRC 0x0804 +#define TT_MS_LANGID_CHINESE_HONG_KONG 0x0C04 +#define TT_MS_LANGID_CHINESE_SINGAPORE 0x1004 +#define TT_MS_LANGID_CHINESE_MACAO 0x1404 +#define TT_MS_LANGID_CZECH_CZECH_REPUBLIC 0x0405 +#define TT_MS_LANGID_DANISH_DENMARK 0x0406 +#define TT_MS_LANGID_GERMAN_GERMANY 0x0407 +#define TT_MS_LANGID_GERMAN_SWITZERLAND 0x0807 +#define TT_MS_LANGID_GERMAN_AUSTRIA 0x0C07 +#define TT_MS_LANGID_GERMAN_LUXEMBOURG 0x1007 +#define TT_MS_LANGID_GERMAN_LIECHTENSTEIN 0x1407 +#define TT_MS_LANGID_GREEK_GREECE 0x0408 +#define TT_MS_LANGID_ENGLISH_UNITED_STATES 0x0409 +#define TT_MS_LANGID_ENGLISH_UNITED_KINGDOM 0x0809 +#define TT_MS_LANGID_ENGLISH_AUSTRALIA 0x0C09 +#define TT_MS_LANGID_ENGLISH_CANADA 0x1009 +#define TT_MS_LANGID_ENGLISH_NEW_ZEALAND 0x1409 +#define TT_MS_LANGID_ENGLISH_IRELAND 0x1809 +#define TT_MS_LANGID_ENGLISH_SOUTH_AFRICA 0x1C09 +#define TT_MS_LANGID_ENGLISH_JAMAICA 0x2009 +#define TT_MS_LANGID_ENGLISH_CARIBBEAN 0x2409 +#define TT_MS_LANGID_ENGLISH_BELIZE 0x2809 +#define TT_MS_LANGID_ENGLISH_TRINIDAD 0x2C09 +#define TT_MS_LANGID_ENGLISH_ZIMBABWE 0x3009 +#define TT_MS_LANGID_ENGLISH_PHILIPPINES 0x3409 +#define TT_MS_LANGID_ENGLISH_INDIA 0x4009 +#define TT_MS_LANGID_ENGLISH_MALAYSIA 0x4409 +#define TT_MS_LANGID_ENGLISH_SINGAPORE 0x4809 +#define TT_MS_LANGID_SPANISH_SPAIN_TRADITIONAL_SORT 0x040A +#define TT_MS_LANGID_SPANISH_MEXICO 0x080A +#define TT_MS_LANGID_SPANISH_SPAIN_MODERN_SORT 0x0C0A +#define TT_MS_LANGID_SPANISH_GUATEMALA 0x100A +#define TT_MS_LANGID_SPANISH_COSTA_RICA 0x140A +#define TT_MS_LANGID_SPANISH_PANAMA 0x180A +#define TT_MS_LANGID_SPANISH_DOMINICAN_REPUBLIC 0x1C0A +#define TT_MS_LANGID_SPANISH_VENEZUELA 0x200A +#define TT_MS_LANGID_SPANISH_COLOMBIA 0x240A +#define TT_MS_LANGID_SPANISH_PERU 0x280A +#define TT_MS_LANGID_SPANISH_ARGENTINA 0x2C0A +#define TT_MS_LANGID_SPANISH_ECUADOR 0x300A +#define TT_MS_LANGID_SPANISH_CHILE 0x340A +#define TT_MS_LANGID_SPANISH_URUGUAY 0x380A +#define TT_MS_LANGID_SPANISH_PARAGUAY 0x3C0A +#define TT_MS_LANGID_SPANISH_BOLIVIA 0x400A +#define TT_MS_LANGID_SPANISH_EL_SALVADOR 0x440A +#define TT_MS_LANGID_SPANISH_HONDURAS 0x480A +#define TT_MS_LANGID_SPANISH_NICARAGUA 0x4C0A +#define TT_MS_LANGID_SPANISH_PUERTO_RICO 0x500A +#define TT_MS_LANGID_SPANISH_UNITED_STATES 0x540A +#define TT_MS_LANGID_FINNISH_FINLAND 0x040B +#define TT_MS_LANGID_FRENCH_FRANCE 0x040C +#define TT_MS_LANGID_FRENCH_BELGIUM 0x080C +#define TT_MS_LANGID_FRENCH_CANADA 0x0C0C +#define TT_MS_LANGID_FRENCH_SWITZERLAND 0x100C +#define TT_MS_LANGID_FRENCH_LUXEMBOURG 0x140C +#define TT_MS_LANGID_FRENCH_MONACO 0x180C +#define TT_MS_LANGID_HEBREW_ISRAEL 0x040D +#define TT_MS_LANGID_HUNGARIAN_HUNGARY 0x040E +#define TT_MS_LANGID_ICELANDIC_ICELAND 0x040F +#define TT_MS_LANGID_ITALIAN_ITALY 0x0410 +#define TT_MS_LANGID_ITALIAN_SWITZERLAND 0x0810 +#define TT_MS_LANGID_JAPANESE_JAPAN 0x0411 +#define TT_MS_LANGID_KOREAN_KOREA 0x0412 +#define TT_MS_LANGID_DUTCH_NETHERLANDS 0x0413 +#define TT_MS_LANGID_DUTCH_BELGIUM 0x0813 +#define TT_MS_LANGID_NORWEGIAN_NORWAY_BOKMAL 0x0414 +#define TT_MS_LANGID_NORWEGIAN_NORWAY_NYNORSK 0x0814 +#define TT_MS_LANGID_POLISH_POLAND 0x0415 +#define TT_MS_LANGID_PORTUGUESE_BRAZIL 0x0416 +#define TT_MS_LANGID_PORTUGUESE_PORTUGAL 0x0816 +#define TT_MS_LANGID_ROMANSH_SWITZERLAND 0x0417 +#define TT_MS_LANGID_ROMANIAN_ROMANIA 0x0418 +#define TT_MS_LANGID_RUSSIAN_RUSSIA 0x0419 +#define TT_MS_LANGID_CROATIAN_CROATIA 0x041A +#define TT_MS_LANGID_SERBIAN_SERBIA_LATIN 0x081A +#define TT_MS_LANGID_SERBIAN_SERBIA_CYRILLIC 0x0C1A +#define TT_MS_LANGID_CROATIAN_BOSNIA_HERZEGOVINA 0x101A +#define TT_MS_LANGID_BOSNIAN_BOSNIA_HERZEGOVINA 0x141A +#define TT_MS_LANGID_SERBIAN_BOSNIA_HERZ_LATIN 0x181A +#define TT_MS_LANGID_SERBIAN_BOSNIA_HERZ_CYRILLIC 0x1C1A +#define TT_MS_LANGID_BOSNIAN_BOSNIA_HERZ_CYRILLIC 0x201A +#define TT_MS_LANGID_SLOVAK_SLOVAKIA 0x041B +#define TT_MS_LANGID_ALBANIAN_ALBANIA 0x041C +#define TT_MS_LANGID_SWEDISH_SWEDEN 0x041D +#define TT_MS_LANGID_SWEDISH_FINLAND 0x081D +#define TT_MS_LANGID_THAI_THAILAND 0x041E +#define TT_MS_LANGID_TURKISH_TURKEY 0x041F +#define TT_MS_LANGID_URDU_PAKISTAN 0x0420 +#define TT_MS_LANGID_INDONESIAN_INDONESIA 0x0421 +#define TT_MS_LANGID_UKRAINIAN_UKRAINE 0x0422 +#define TT_MS_LANGID_BELARUSIAN_BELARUS 0x0423 +#define TT_MS_LANGID_SLOVENIAN_SLOVENIA 0x0424 +#define TT_MS_LANGID_ESTONIAN_ESTONIA 0x0425 +#define TT_MS_LANGID_LATVIAN_LATVIA 0x0426 +#define TT_MS_LANGID_LITHUANIAN_LITHUANIA 0x0427 +#define TT_MS_LANGID_TAJIK_TAJIKISTAN 0x0428 +#define TT_MS_LANGID_VIETNAMESE_VIET_NAM 0x042A +#define TT_MS_LANGID_ARMENIAN_ARMENIA 0x042B +#define TT_MS_LANGID_AZERI_AZERBAIJAN_LATIN 0x042C +#define TT_MS_LANGID_AZERI_AZERBAIJAN_CYRILLIC 0x082C +#define TT_MS_LANGID_BASQUE_BASQUE 0x042D +#define TT_MS_LANGID_UPPER_SORBIAN_GERMANY 0x042E +#define TT_MS_LANGID_LOWER_SORBIAN_GERMANY 0x082E +#define TT_MS_LANGID_MACEDONIAN_MACEDONIA 0x042F +#define TT_MS_LANGID_SETSWANA_SOUTH_AFRICA 0x0432 +#define TT_MS_LANGID_ISIXHOSA_SOUTH_AFRICA 0x0434 +#define TT_MS_LANGID_ISIZULU_SOUTH_AFRICA 0x0435 +#define TT_MS_LANGID_AFRIKAANS_SOUTH_AFRICA 0x0436 +#define TT_MS_LANGID_GEORGIAN_GEORGIA 0x0437 +#define TT_MS_LANGID_FAEROESE_FAEROE_ISLANDS 0x0438 +#define TT_MS_LANGID_HINDI_INDIA 0x0439 +#define TT_MS_LANGID_MALTESE_MALTA 0x043A +#define TT_MS_LANGID_SAMI_NORTHERN_NORWAY 0x043B +#define TT_MS_LANGID_SAMI_NORTHERN_SWEDEN 0x083B +#define TT_MS_LANGID_SAMI_NORTHERN_FINLAND 0x0C3B +#define TT_MS_LANGID_SAMI_LULE_NORWAY 0x103B +#define TT_MS_LANGID_SAMI_LULE_SWEDEN 0x143B +#define TT_MS_LANGID_SAMI_SOUTHERN_NORWAY 0x183B +#define TT_MS_LANGID_SAMI_SOUTHERN_SWEDEN 0x1C3B +#define TT_MS_LANGID_SAMI_SKOLT_FINLAND 0x203B +#define TT_MS_LANGID_SAMI_INARI_FINLAND 0x243B +#define TT_MS_LANGID_IRISH_IRELAND 0x083C +#define TT_MS_LANGID_MALAY_MALAYSIA 0x043E +#define TT_MS_LANGID_MALAY_BRUNEI_DARUSSALAM 0x083E +#define TT_MS_LANGID_KAZAKH_KAZAKHSTAN 0x043F +#define TT_MS_LANGID_KYRGYZ_KYRGYZSTAN /* Cyrillic */ 0x0440 +#define TT_MS_LANGID_KISWAHILI_KENYA 0x0441 +#define TT_MS_LANGID_TURKMEN_TURKMENISTAN 0x0442 +#define TT_MS_LANGID_UZBEK_UZBEKISTAN_LATIN 0x0443 +#define TT_MS_LANGID_UZBEK_UZBEKISTAN_CYRILLIC 0x0843 +#define TT_MS_LANGID_TATAR_RUSSIA 0x0444 +#define TT_MS_LANGID_BENGALI_INDIA 0x0445 +#define TT_MS_LANGID_BENGALI_BANGLADESH 0x0845 +#define TT_MS_LANGID_PUNJABI_INDIA 0x0446 +#define TT_MS_LANGID_GUJARATI_INDIA 0x0447 +#define TT_MS_LANGID_ODIA_INDIA 0x0448 +#define TT_MS_LANGID_TAMIL_INDIA 0x0449 +#define TT_MS_LANGID_TELUGU_INDIA 0x044A +#define TT_MS_LANGID_KANNADA_INDIA 0x044B +#define TT_MS_LANGID_MALAYALAM_INDIA 0x044C +#define TT_MS_LANGID_ASSAMESE_INDIA 0x044D +#define TT_MS_LANGID_MARATHI_INDIA 0x044E +#define TT_MS_LANGID_SANSKRIT_INDIA 0x044F +#define TT_MS_LANGID_MONGOLIAN_MONGOLIA /* Cyrillic */ 0x0450 +#define TT_MS_LANGID_MONGOLIAN_PRC 0x0850 +#define TT_MS_LANGID_TIBETAN_PRC 0x0451 +#define TT_MS_LANGID_WELSH_UNITED_KINGDOM 0x0452 +#define TT_MS_LANGID_KHMER_CAMBODIA 0x0453 +#define TT_MS_LANGID_LAO_LAOS 0x0454 +#define TT_MS_LANGID_GALICIAN_GALICIAN 0x0456 +#define TT_MS_LANGID_KONKANI_INDIA 0x0457 +#define TT_MS_LANGID_SYRIAC_SYRIA 0x045A +#define TT_MS_LANGID_SINHALA_SRI_LANKA 0x045B +#define TT_MS_LANGID_INUKTITUT_CANADA 0x045D +#define TT_MS_LANGID_INUKTITUT_CANADA_LATIN 0x085D +#define TT_MS_LANGID_AMHARIC_ETHIOPIA 0x045E +#define TT_MS_LANGID_TAMAZIGHT_ALGERIA 0x085F +#define TT_MS_LANGID_NEPALI_NEPAL 0x0461 +#define TT_MS_LANGID_FRISIAN_NETHERLANDS 0x0462 +#define TT_MS_LANGID_PASHTO_AFGHANISTAN 0x0463 +#define TT_MS_LANGID_FILIPINO_PHILIPPINES 0x0464 +#define TT_MS_LANGID_DHIVEHI_MALDIVES 0x0465 +#define TT_MS_LANGID_HAUSA_NIGERIA 0x0468 +#define TT_MS_LANGID_YORUBA_NIGERIA 0x046A +#define TT_MS_LANGID_QUECHUA_BOLIVIA 0x046B +#define TT_MS_LANGID_QUECHUA_ECUADOR 0x086B +#define TT_MS_LANGID_QUECHUA_PERU 0x0C6B +#define TT_MS_LANGID_SESOTHO_SA_LEBOA_SOUTH_AFRICA 0x046C +#define TT_MS_LANGID_BASHKIR_RUSSIA 0x046D +#define TT_MS_LANGID_LUXEMBOURGISH_LUXEMBOURG 0x046E +#define TT_MS_LANGID_GREENLANDIC_GREENLAND 0x046F +#define TT_MS_LANGID_IGBO_NIGERIA 0x0470 +#define TT_MS_LANGID_YI_PRC 0x0478 +#define TT_MS_LANGID_MAPUDUNGUN_CHILE 0x047A +#define TT_MS_LANGID_MOHAWK_MOHAWK 0x047C +#define TT_MS_LANGID_BRETON_FRANCE 0x047E +#define TT_MS_LANGID_UIGHUR_PRC 0x0480 +#define TT_MS_LANGID_MAORI_NEW_ZEALAND 0x0481 +#define TT_MS_LANGID_OCCITAN_FRANCE 0x0482 +#define TT_MS_LANGID_CORSICAN_FRANCE 0x0483 +#define TT_MS_LANGID_ALSATIAN_FRANCE 0x0484 +#define TT_MS_LANGID_YAKUT_RUSSIA 0x0485 +#define TT_MS_LANGID_KICHE_GUATEMALA 0x0486 +#define TT_MS_LANGID_KINYARWANDA_RWANDA 0x0487 +#define TT_MS_LANGID_WOLOF_SENEGAL 0x0488 +#define TT_MS_LANGID_DARI_AFGHANISTAN 0x048C + + /* */ + + + /* legacy macro definitions not present in OpenType 1.8.1 */ +#define TT_MS_LANGID_ARABIC_GENERAL 0x0001 +#define TT_MS_LANGID_CATALAN_SPAIN \ + TT_MS_LANGID_CATALAN_CATALAN +#define TT_MS_LANGID_CHINESE_GENERAL 0x0004 +#define TT_MS_LANGID_CHINESE_MACAU \ + TT_MS_LANGID_CHINESE_MACAO +#define TT_MS_LANGID_GERMAN_LIECHTENSTEI \ + TT_MS_LANGID_GERMAN_LIECHTENSTEIN +#define TT_MS_LANGID_ENGLISH_GENERAL 0x0009 +#define TT_MS_LANGID_ENGLISH_INDONESIA 0x3809 +#define TT_MS_LANGID_ENGLISH_HONG_KONG 0x3C09 +#define TT_MS_LANGID_SPANISH_SPAIN_INTERNATIONAL_SORT \ + TT_MS_LANGID_SPANISH_SPAIN_MODERN_SORT +#define TT_MS_LANGID_SPANISH_LATIN_AMERICA 0xE40AU +#define TT_MS_LANGID_FRENCH_WEST_INDIES 0x1C0C +#define TT_MS_LANGID_FRENCH_REUNION 0x200C +#define TT_MS_LANGID_FRENCH_CONGO 0x240C + /* which was formerly: */ +#define TT_MS_LANGID_FRENCH_ZAIRE \ + TT_MS_LANGID_FRENCH_CONGO +#define TT_MS_LANGID_FRENCH_SENEGAL 0x280C +#define TT_MS_LANGID_FRENCH_CAMEROON 0x2C0C +#define TT_MS_LANGID_FRENCH_COTE_D_IVOIRE 0x300C +#define TT_MS_LANGID_FRENCH_MALI 0x340C +#define TT_MS_LANGID_FRENCH_MOROCCO 0x380C +#define TT_MS_LANGID_FRENCH_HAITI 0x3C0C +#define TT_MS_LANGID_FRENCH_NORTH_AFRICA 0xE40CU +#define TT_MS_LANGID_KOREAN_EXTENDED_WANSUNG_KOREA \ + TT_MS_LANGID_KOREAN_KOREA +#define TT_MS_LANGID_KOREAN_JOHAB_KOREA 0x0812 +#define TT_MS_LANGID_RHAETO_ROMANIC_SWITZERLAND \ + TT_MS_LANGID_ROMANSH_SWITZERLAND +#define TT_MS_LANGID_MOLDAVIAN_MOLDAVIA 0x0818 +#define TT_MS_LANGID_RUSSIAN_MOLDAVIA 0x0819 +#define TT_MS_LANGID_URDU_INDIA 0x0820 +#define TT_MS_LANGID_CLASSIC_LITHUANIAN_LITHUANIA 0x0827 +#define TT_MS_LANGID_SLOVENE_SLOVENIA \ + TT_MS_LANGID_SLOVENIAN_SLOVENIA +#define TT_MS_LANGID_FARSI_IRAN 0x0429 +#define TT_MS_LANGID_BASQUE_SPAIN \ + TT_MS_LANGID_BASQUE_BASQUE +#define TT_MS_LANGID_SORBIAN_GERMANY \ + TT_MS_LANGID_UPPER_SORBIAN_GERMANY +#define TT_MS_LANGID_SUTU_SOUTH_AFRICA 0x0430 +#define TT_MS_LANGID_TSONGA_SOUTH_AFRICA 0x0431 +#define TT_MS_LANGID_TSWANA_SOUTH_AFRICA \ + TT_MS_LANGID_SETSWANA_SOUTH_AFRICA +#define TT_MS_LANGID_VENDA_SOUTH_AFRICA 0x0433 +#define TT_MS_LANGID_XHOSA_SOUTH_AFRICA \ + TT_MS_LANGID_ISIXHOSA_SOUTH_AFRICA +#define TT_MS_LANGID_ZULU_SOUTH_AFRICA \ + TT_MS_LANGID_ISIZULU_SOUTH_AFRICA +#define TT_MS_LANGID_SAAMI_LAPONIA 0x043B + /* the next two values are incorrectly inverted */ +#define TT_MS_LANGID_IRISH_GAELIC_IRELAND 0x043C +#define TT_MS_LANGID_SCOTTISH_GAELIC_UNITED_KINGDOM 0x083C +#define TT_MS_LANGID_YIDDISH_GERMANY 0x043D +#define TT_MS_LANGID_KAZAK_KAZAKSTAN \ + TT_MS_LANGID_KAZAKH_KAZAKHSTAN +#define TT_MS_LANGID_KIRGHIZ_KIRGHIZ_REPUBLIC \ + TT_MS_LANGID_KYRGYZ_KYRGYZSTAN +#define TT_MS_LANGID_KIRGHIZ_KIRGHIZSTAN \ + TT_MS_LANGID_KYRGYZ_KYRGYZSTAN +#define TT_MS_LANGID_SWAHILI_KENYA \ + TT_MS_LANGID_KISWAHILI_KENYA +#define TT_MS_LANGID_TATAR_TATARSTAN \ + TT_MS_LANGID_TATAR_RUSSIA +#define TT_MS_LANGID_PUNJABI_ARABIC_PAKISTAN 0x0846 +#define TT_MS_LANGID_ORIYA_INDIA \ + TT_MS_LANGID_ODIA_INDIA +#define TT_MS_LANGID_MONGOLIAN_MONGOLIA_MONGOLIAN \ + TT_MS_LANGID_MONGOLIAN_PRC +#define TT_MS_LANGID_TIBETAN_CHINA \ + TT_MS_LANGID_TIBETAN_PRC +#define TT_MS_LANGID_DZONGHKA_BHUTAN 0x0851 +#define TT_MS_LANGID_TIBETAN_BHUTAN \ + TT_MS_LANGID_DZONGHKA_BHUTAN +#define TT_MS_LANGID_WELSH_WALES \ + TT_MS_LANGID_WELSH_UNITED_KINGDOM +#define TT_MS_LANGID_BURMESE_MYANMAR 0x0455 +#define TT_MS_LANGID_GALICIAN_SPAIN \ + TT_MS_LANGID_GALICIAN_GALICIAN +#define TT_MS_LANGID_MANIPURI_INDIA /* Bengali */ 0x0458 +#define TT_MS_LANGID_SINDHI_INDIA /* Arabic */ 0x0459 +#define TT_MS_LANGID_SINDHI_PAKISTAN 0x0859 +#define TT_MS_LANGID_SINHALESE_SRI_LANKA \ + TT_MS_LANGID_SINHALA_SRI_LANKA +#define TT_MS_LANGID_CHEROKEE_UNITED_STATES 0x045C +#define TT_MS_LANGID_TAMAZIGHT_MOROCCO /* Arabic */ 0x045F +#define TT_MS_LANGID_TAMAZIGHT_MOROCCO_LATIN \ + TT_MS_LANGID_TAMAZIGHT_ALGERIA +#define TT_MS_LANGID_KASHMIRI_PAKISTAN /* Arabic */ 0x0460 +#define TT_MS_LANGID_KASHMIRI_SASIA 0x0860 +#define TT_MS_LANGID_KASHMIRI_INDIA \ + TT_MS_LANGID_KASHMIRI_SASIA +#define TT_MS_LANGID_NEPALI_INDIA 0x0861 +#define TT_MS_LANGID_DIVEHI_MALDIVES \ + TT_MS_LANGID_DHIVEHI_MALDIVES +#define TT_MS_LANGID_EDO_NIGERIA 0x0466 +#define TT_MS_LANGID_FULFULDE_NIGERIA 0x0467 +#define TT_MS_LANGID_IBIBIO_NIGERIA 0x0469 +#define TT_MS_LANGID_SEPEDI_SOUTH_AFRICA \ + TT_MS_LANGID_SESOTHO_SA_LEBOA_SOUTH_AFRICA +#define TT_MS_LANGID_SOTHO_SOUTHERN_SOUTH_AFRICA \ + TT_MS_LANGID_SESOTHO_SA_LEBOA_SOUTH_AFRICA +#define TT_MS_LANGID_KANURI_NIGERIA 0x0471 +#define TT_MS_LANGID_OROMO_ETHIOPIA 0x0472 +#define TT_MS_LANGID_TIGRIGNA_ETHIOPIA 0x0473 +#define TT_MS_LANGID_TIGRIGNA_ERYTHREA 0x0873 +#define TT_MS_LANGID_TIGRIGNA_ERYTREA \ + TT_MS_LANGID_TIGRIGNA_ERYTHREA +#define TT_MS_LANGID_GUARANI_PARAGUAY 0x0474 +#define TT_MS_LANGID_HAWAIIAN_UNITED_STATES 0x0475 +#define TT_MS_LANGID_LATIN 0x0476 +#define TT_MS_LANGID_SOMALI_SOMALIA 0x0477 +#define TT_MS_LANGID_YI_CHINA \ + TT_MS_LANGID_YI_PRC +#define TT_MS_LANGID_PAPIAMENTU_NETHERLANDS_ANTILLES 0x0479 +#define TT_MS_LANGID_UIGHUR_CHINA \ + TT_MS_LANGID_UIGHUR_PRC + + + /************************************************************************** + * + * @enum: + * TT_NAME_ID_XXX + * + * @description: + * Possible values of the 'name' identifier field in the name records of + * an SFNT 'name' table. These values are platform independent. + */ + +#define TT_NAME_ID_COPYRIGHT 0 +#define TT_NAME_ID_FONT_FAMILY 1 +#define TT_NAME_ID_FONT_SUBFAMILY 2 +#define TT_NAME_ID_UNIQUE_ID 3 +#define TT_NAME_ID_FULL_NAME 4 +#define TT_NAME_ID_VERSION_STRING 5 +#define TT_NAME_ID_PS_NAME 6 +#define TT_NAME_ID_TRADEMARK 7 + + /* the following values are from the OpenType spec */ +#define TT_NAME_ID_MANUFACTURER 8 +#define TT_NAME_ID_DESIGNER 9 +#define TT_NAME_ID_DESCRIPTION 10 +#define TT_NAME_ID_VENDOR_URL 11 +#define TT_NAME_ID_DESIGNER_URL 12 +#define TT_NAME_ID_LICENSE 13 +#define TT_NAME_ID_LICENSE_URL 14 + /* number 15 is reserved */ +#define TT_NAME_ID_TYPOGRAPHIC_FAMILY 16 +#define TT_NAME_ID_TYPOGRAPHIC_SUBFAMILY 17 +#define TT_NAME_ID_MAC_FULL_NAME 18 + + /* The following code is new as of 2000-01-21 */ +#define TT_NAME_ID_SAMPLE_TEXT 19 + + /* This is new in OpenType 1.3 */ +#define TT_NAME_ID_CID_FINDFONT_NAME 20 + + /* This is new in OpenType 1.5 */ +#define TT_NAME_ID_WWS_FAMILY 21 +#define TT_NAME_ID_WWS_SUBFAMILY 22 + + /* This is new in OpenType 1.7 */ +#define TT_NAME_ID_LIGHT_BACKGROUND 23 +#define TT_NAME_ID_DARK_BACKGROUND 24 + + /* This is new in OpenType 1.8 */ +#define TT_NAME_ID_VARIATIONS_PREFIX 25 + + /* these two values are deprecated */ +#define TT_NAME_ID_PREFERRED_FAMILY TT_NAME_ID_TYPOGRAPHIC_FAMILY +#define TT_NAME_ID_PREFERRED_SUBFAMILY TT_NAME_ID_TYPOGRAPHIC_SUBFAMILY + + + /************************************************************************** + * + * @enum: + * TT_UCR_XXX + * + * @description: + * Possible bit mask values for the `ulUnicodeRangeX` fields in an SFNT + * 'OS/2' table. + */ + + /* ulUnicodeRange1 */ + /* --------------- */ + + /* Bit 0 Basic Latin */ +#define TT_UCR_BASIC_LATIN (1L << 0) /* U+0020-U+007E */ + /* Bit 1 C1 Controls and Latin-1 Supplement */ +#define TT_UCR_LATIN1_SUPPLEMENT (1L << 1) /* U+0080-U+00FF */ + /* Bit 2 Latin Extended-A */ +#define TT_UCR_LATIN_EXTENDED_A (1L << 2) /* U+0100-U+017F */ + /* Bit 3 Latin Extended-B */ +#define TT_UCR_LATIN_EXTENDED_B (1L << 3) /* U+0180-U+024F */ + /* Bit 4 IPA Extensions */ + /* Phonetic Extensions */ + /* Phonetic Extensions Supplement */ +#define TT_UCR_IPA_EXTENSIONS (1L << 4) /* U+0250-U+02AF */ + /* U+1D00-U+1D7F */ + /* U+1D80-U+1DBF */ + /* Bit 5 Spacing Modifier Letters */ + /* Modifier Tone Letters */ +#define TT_UCR_SPACING_MODIFIER (1L << 5) /* U+02B0-U+02FF */ + /* U+A700-U+A71F */ + /* Bit 6 Combining Diacritical Marks */ + /* Combining Diacritical Marks Supplement */ +#define TT_UCR_COMBINING_DIACRITICAL_MARKS (1L << 6) /* U+0300-U+036F */ + /* U+1DC0-U+1DFF */ + /* Bit 7 Greek and Coptic */ +#define TT_UCR_GREEK (1L << 7) /* U+0370-U+03FF */ + /* Bit 8 Coptic */ +#define TT_UCR_COPTIC (1L << 8) /* U+2C80-U+2CFF */ + /* Bit 9 Cyrillic */ + /* Cyrillic Supplement */ + /* Cyrillic Extended-A */ + /* Cyrillic Extended-B */ +#define TT_UCR_CYRILLIC (1L << 9) /* U+0400-U+04FF */ + /* U+0500-U+052F */ + /* U+2DE0-U+2DFF */ + /* U+A640-U+A69F */ + /* Bit 10 Armenian */ +#define TT_UCR_ARMENIAN (1L << 10) /* U+0530-U+058F */ + /* Bit 11 Hebrew */ +#define TT_UCR_HEBREW (1L << 11) /* U+0590-U+05FF */ + /* Bit 12 Vai */ +#define TT_UCR_VAI (1L << 12) /* U+A500-U+A63F */ + /* Bit 13 Arabic */ + /* Arabic Supplement */ +#define TT_UCR_ARABIC (1L << 13) /* U+0600-U+06FF */ + /* U+0750-U+077F */ + /* Bit 14 NKo */ +#define TT_UCR_NKO (1L << 14) /* U+07C0-U+07FF */ + /* Bit 15 Devanagari */ +#define TT_UCR_DEVANAGARI (1L << 15) /* U+0900-U+097F */ + /* Bit 16 Bengali */ +#define TT_UCR_BENGALI (1L << 16) /* U+0980-U+09FF */ + /* Bit 17 Gurmukhi */ +#define TT_UCR_GURMUKHI (1L << 17) /* U+0A00-U+0A7F */ + /* Bit 18 Gujarati */ +#define TT_UCR_GUJARATI (1L << 18) /* U+0A80-U+0AFF */ + /* Bit 19 Oriya */ +#define TT_UCR_ORIYA (1L << 19) /* U+0B00-U+0B7F */ + /* Bit 20 Tamil */ +#define TT_UCR_TAMIL (1L << 20) /* U+0B80-U+0BFF */ + /* Bit 21 Telugu */ +#define TT_UCR_TELUGU (1L << 21) /* U+0C00-U+0C7F */ + /* Bit 22 Kannada */ +#define TT_UCR_KANNADA (1L << 22) /* U+0C80-U+0CFF */ + /* Bit 23 Malayalam */ +#define TT_UCR_MALAYALAM (1L << 23) /* U+0D00-U+0D7F */ + /* Bit 24 Thai */ +#define TT_UCR_THAI (1L << 24) /* U+0E00-U+0E7F */ + /* Bit 25 Lao */ +#define TT_UCR_LAO (1L << 25) /* U+0E80-U+0EFF */ + /* Bit 26 Georgian */ + /* Georgian Supplement */ +#define TT_UCR_GEORGIAN (1L << 26) /* U+10A0-U+10FF */ + /* U+2D00-U+2D2F */ + /* Bit 27 Balinese */ +#define TT_UCR_BALINESE (1L << 27) /* U+1B00-U+1B7F */ + /* Bit 28 Hangul Jamo */ +#define TT_UCR_HANGUL_JAMO (1L << 28) /* U+1100-U+11FF */ + /* Bit 29 Latin Extended Additional */ + /* Latin Extended-C */ + /* Latin Extended-D */ +#define TT_UCR_LATIN_EXTENDED_ADDITIONAL (1L << 29) /* U+1E00-U+1EFF */ + /* U+2C60-U+2C7F */ + /* U+A720-U+A7FF */ + /* Bit 30 Greek Extended */ +#define TT_UCR_GREEK_EXTENDED (1L << 30) /* U+1F00-U+1FFF */ + /* Bit 31 General Punctuation */ + /* Supplemental Punctuation */ +#define TT_UCR_GENERAL_PUNCTUATION (1L << 31) /* U+2000-U+206F */ + /* U+2E00-U+2E7F */ + + /* ulUnicodeRange2 */ + /* --------------- */ + + /* Bit 32 Superscripts And Subscripts */ +#define TT_UCR_SUPERSCRIPTS_SUBSCRIPTS (1L << 0) /* U+2070-U+209F */ + /* Bit 33 Currency Symbols */ +#define TT_UCR_CURRENCY_SYMBOLS (1L << 1) /* U+20A0-U+20CF */ + /* Bit 34 Combining Diacritical Marks For Symbols */ +#define TT_UCR_COMBINING_DIACRITICAL_MARKS_SYMB \ + (1L << 2) /* U+20D0-U+20FF */ + /* Bit 35 Letterlike Symbols */ +#define TT_UCR_LETTERLIKE_SYMBOLS (1L << 3) /* U+2100-U+214F */ + /* Bit 36 Number Forms */ +#define TT_UCR_NUMBER_FORMS (1L << 4) /* U+2150-U+218F */ + /* Bit 37 Arrows */ + /* Supplemental Arrows-A */ + /* Supplemental Arrows-B */ + /* Miscellaneous Symbols and Arrows */ +#define TT_UCR_ARROWS (1L << 5) /* U+2190-U+21FF */ + /* U+27F0-U+27FF */ + /* U+2900-U+297F */ + /* U+2B00-U+2BFF */ + /* Bit 38 Mathematical Operators */ + /* Supplemental Mathematical Operators */ + /* Miscellaneous Mathematical Symbols-A */ + /* Miscellaneous Mathematical Symbols-B */ +#define TT_UCR_MATHEMATICAL_OPERATORS (1L << 6) /* U+2200-U+22FF */ + /* U+2A00-U+2AFF */ + /* U+27C0-U+27EF */ + /* U+2980-U+29FF */ + /* Bit 39 Miscellaneous Technical */ +#define TT_UCR_MISCELLANEOUS_TECHNICAL (1L << 7) /* U+2300-U+23FF */ + /* Bit 40 Control Pictures */ +#define TT_UCR_CONTROL_PICTURES (1L << 8) /* U+2400-U+243F */ + /* Bit 41 Optical Character Recognition */ +#define TT_UCR_OCR (1L << 9) /* U+2440-U+245F */ + /* Bit 42 Enclosed Alphanumerics */ +#define TT_UCR_ENCLOSED_ALPHANUMERICS (1L << 10) /* U+2460-U+24FF */ + /* Bit 43 Box Drawing */ +#define TT_UCR_BOX_DRAWING (1L << 11) /* U+2500-U+257F */ + /* Bit 44 Block Elements */ +#define TT_UCR_BLOCK_ELEMENTS (1L << 12) /* U+2580-U+259F */ + /* Bit 45 Geometric Shapes */ +#define TT_UCR_GEOMETRIC_SHAPES (1L << 13) /* U+25A0-U+25FF */ + /* Bit 46 Miscellaneous Symbols */ +#define TT_UCR_MISCELLANEOUS_SYMBOLS (1L << 14) /* U+2600-U+26FF */ + /* Bit 47 Dingbats */ +#define TT_UCR_DINGBATS (1L << 15) /* U+2700-U+27BF */ + /* Bit 48 CJK Symbols and Punctuation */ +#define TT_UCR_CJK_SYMBOLS (1L << 16) /* U+3000-U+303F */ + /* Bit 49 Hiragana */ +#define TT_UCR_HIRAGANA (1L << 17) /* U+3040-U+309F */ + /* Bit 50 Katakana */ + /* Katakana Phonetic Extensions */ +#define TT_UCR_KATAKANA (1L << 18) /* U+30A0-U+30FF */ + /* U+31F0-U+31FF */ + /* Bit 51 Bopomofo */ + /* Bopomofo Extended */ +#define TT_UCR_BOPOMOFO (1L << 19) /* U+3100-U+312F */ + /* U+31A0-U+31BF */ + /* Bit 52 Hangul Compatibility Jamo */ +#define TT_UCR_HANGUL_COMPATIBILITY_JAMO (1L << 20) /* U+3130-U+318F */ + /* Bit 53 Phags-Pa */ +#define TT_UCR_CJK_MISC (1L << 21) /* U+A840-U+A87F */ +#define TT_UCR_KANBUN TT_UCR_CJK_MISC /* deprecated */ +#define TT_UCR_PHAGSPA + /* Bit 54 Enclosed CJK Letters and Months */ +#define TT_UCR_ENCLOSED_CJK_LETTERS_MONTHS (1L << 22) /* U+3200-U+32FF */ + /* Bit 55 CJK Compatibility */ +#define TT_UCR_CJK_COMPATIBILITY (1L << 23) /* U+3300-U+33FF */ + /* Bit 56 Hangul Syllables */ +#define TT_UCR_HANGUL (1L << 24) /* U+AC00-U+D7A3 */ + /* Bit 57 High Surrogates */ + /* High Private Use Surrogates */ + /* Low Surrogates */ + + /* According to OpenType specs v.1.3+, */ + /* setting bit 57 implies that there is */ + /* at least one codepoint beyond the */ + /* Basic Multilingual Plane that is */ + /* supported by this font. So it really */ + /* means >= U+10000. */ +#define TT_UCR_SURROGATES (1L << 25) /* U+D800-U+DB7F */ + /* U+DB80-U+DBFF */ + /* U+DC00-U+DFFF */ +#define TT_UCR_NON_PLANE_0 TT_UCR_SURROGATES + /* Bit 58 Phoenician */ +#define TT_UCR_PHOENICIAN (1L << 26) /*U+10900-U+1091F*/ + /* Bit 59 CJK Unified Ideographs */ + /* CJK Radicals Supplement */ + /* Kangxi Radicals */ + /* Ideographic Description Characters */ + /* CJK Unified Ideographs Extension A */ + /* CJK Unified Ideographs Extension B */ + /* Kanbun */ +#define TT_UCR_CJK_UNIFIED_IDEOGRAPHS (1L << 27) /* U+4E00-U+9FFF */ + /* U+2E80-U+2EFF */ + /* U+2F00-U+2FDF */ + /* U+2FF0-U+2FFF */ + /* U+3400-U+4DB5 */ + /*U+20000-U+2A6DF*/ + /* U+3190-U+319F */ + /* Bit 60 Private Use */ +#define TT_UCR_PRIVATE_USE (1L << 28) /* U+E000-U+F8FF */ + /* Bit 61 CJK Strokes */ + /* CJK Compatibility Ideographs */ + /* CJK Compatibility Ideographs Supplement */ +#define TT_UCR_CJK_COMPATIBILITY_IDEOGRAPHS (1L << 29) /* U+31C0-U+31EF */ + /* U+F900-U+FAFF */ + /*U+2F800-U+2FA1F*/ + /* Bit 62 Alphabetic Presentation Forms */ +#define TT_UCR_ALPHABETIC_PRESENTATION_FORMS (1L << 30) /* U+FB00-U+FB4F */ + /* Bit 63 Arabic Presentation Forms-A */ +#define TT_UCR_ARABIC_PRESENTATION_FORMS_A (1L << 31) /* U+FB50-U+FDFF */ + + /* ulUnicodeRange3 */ + /* --------------- */ + + /* Bit 64 Combining Half Marks */ +#define TT_UCR_COMBINING_HALF_MARKS (1L << 0) /* U+FE20-U+FE2F */ + /* Bit 65 Vertical forms */ + /* CJK Compatibility Forms */ +#define TT_UCR_CJK_COMPATIBILITY_FORMS (1L << 1) /* U+FE10-U+FE1F */ + /* U+FE30-U+FE4F */ + /* Bit 66 Small Form Variants */ +#define TT_UCR_SMALL_FORM_VARIANTS (1L << 2) /* U+FE50-U+FE6F */ + /* Bit 67 Arabic Presentation Forms-B */ +#define TT_UCR_ARABIC_PRESENTATION_FORMS_B (1L << 3) /* U+FE70-U+FEFE */ + /* Bit 68 Halfwidth and Fullwidth Forms */ +#define TT_UCR_HALFWIDTH_FULLWIDTH_FORMS (1L << 4) /* U+FF00-U+FFEF */ + /* Bit 69 Specials */ +#define TT_UCR_SPECIALS (1L << 5) /* U+FFF0-U+FFFD */ + /* Bit 70 Tibetan */ +#define TT_UCR_TIBETAN (1L << 6) /* U+0F00-U+0FFF */ + /* Bit 71 Syriac */ +#define TT_UCR_SYRIAC (1L << 7) /* U+0700-U+074F */ + /* Bit 72 Thaana */ +#define TT_UCR_THAANA (1L << 8) /* U+0780-U+07BF */ + /* Bit 73 Sinhala */ +#define TT_UCR_SINHALA (1L << 9) /* U+0D80-U+0DFF */ + /* Bit 74 Myanmar */ +#define TT_UCR_MYANMAR (1L << 10) /* U+1000-U+109F */ + /* Bit 75 Ethiopic */ + /* Ethiopic Supplement */ + /* Ethiopic Extended */ +#define TT_UCR_ETHIOPIC (1L << 11) /* U+1200-U+137F */ + /* U+1380-U+139F */ + /* U+2D80-U+2DDF */ + /* Bit 76 Cherokee */ +#define TT_UCR_CHEROKEE (1L << 12) /* U+13A0-U+13FF */ + /* Bit 77 Unified Canadian Aboriginal Syllabics */ +#define TT_UCR_CANADIAN_ABORIGINAL_SYLLABICS (1L << 13) /* U+1400-U+167F */ + /* Bit 78 Ogham */ +#define TT_UCR_OGHAM (1L << 14) /* U+1680-U+169F */ + /* Bit 79 Runic */ +#define TT_UCR_RUNIC (1L << 15) /* U+16A0-U+16FF */ + /* Bit 80 Khmer */ + /* Khmer Symbols */ +#define TT_UCR_KHMER (1L << 16) /* U+1780-U+17FF */ + /* U+19E0-U+19FF */ + /* Bit 81 Mongolian */ +#define TT_UCR_MONGOLIAN (1L << 17) /* U+1800-U+18AF */ + /* Bit 82 Braille Patterns */ +#define TT_UCR_BRAILLE (1L << 18) /* U+2800-U+28FF */ + /* Bit 83 Yi Syllables */ + /* Yi Radicals */ +#define TT_UCR_YI (1L << 19) /* U+A000-U+A48F */ + /* U+A490-U+A4CF */ + /* Bit 84 Tagalog */ + /* Hanunoo */ + /* Buhid */ + /* Tagbanwa */ +#define TT_UCR_PHILIPPINE (1L << 20) /* U+1700-U+171F */ + /* U+1720-U+173F */ + /* U+1740-U+175F */ + /* U+1760-U+177F */ + /* Bit 85 Old Italic */ +#define TT_UCR_OLD_ITALIC (1L << 21) /*U+10300-U+1032F*/ + /* Bit 86 Gothic */ +#define TT_UCR_GOTHIC (1L << 22) /*U+10330-U+1034F*/ + /* Bit 87 Deseret */ +#define TT_UCR_DESERET (1L << 23) /*U+10400-U+1044F*/ + /* Bit 88 Byzantine Musical Symbols */ + /* Musical Symbols */ + /* Ancient Greek Musical Notation */ +#define TT_UCR_MUSICAL_SYMBOLS (1L << 24) /*U+1D000-U+1D0FF*/ + /*U+1D100-U+1D1FF*/ + /*U+1D200-U+1D24F*/ + /* Bit 89 Mathematical Alphanumeric Symbols */ +#define TT_UCR_MATH_ALPHANUMERIC_SYMBOLS (1L << 25) /*U+1D400-U+1D7FF*/ + /* Bit 90 Private Use (plane 15) */ + /* Private Use (plane 16) */ +#define TT_UCR_PRIVATE_USE_SUPPLEMENTARY (1L << 26) /*U+F0000-U+FFFFD*/ + /*U+100000-U+10FFFD*/ + /* Bit 91 Variation Selectors */ + /* Variation Selectors Supplement */ +#define TT_UCR_VARIATION_SELECTORS (1L << 27) /* U+FE00-U+FE0F */ + /*U+E0100-U+E01EF*/ + /* Bit 92 Tags */ +#define TT_UCR_TAGS (1L << 28) /*U+E0000-U+E007F*/ + /* Bit 93 Limbu */ +#define TT_UCR_LIMBU (1L << 29) /* U+1900-U+194F */ + /* Bit 94 Tai Le */ +#define TT_UCR_TAI_LE (1L << 30) /* U+1950-U+197F */ + /* Bit 95 New Tai Lue */ +#define TT_UCR_NEW_TAI_LUE (1L << 31) /* U+1980-U+19DF */ + + /* ulUnicodeRange4 */ + /* --------------- */ + + /* Bit 96 Buginese */ +#define TT_UCR_BUGINESE (1L << 0) /* U+1A00-U+1A1F */ + /* Bit 97 Glagolitic */ +#define TT_UCR_GLAGOLITIC (1L << 1) /* U+2C00-U+2C5F */ + /* Bit 98 Tifinagh */ +#define TT_UCR_TIFINAGH (1L << 2) /* U+2D30-U+2D7F */ + /* Bit 99 Yijing Hexagram Symbols */ +#define TT_UCR_YIJING (1L << 3) /* U+4DC0-U+4DFF */ + /* Bit 100 Syloti Nagri */ +#define TT_UCR_SYLOTI_NAGRI (1L << 4) /* U+A800-U+A82F */ + /* Bit 101 Linear B Syllabary */ + /* Linear B Ideograms */ + /* Aegean Numbers */ +#define TT_UCR_LINEAR_B (1L << 5) /*U+10000-U+1007F*/ + /*U+10080-U+100FF*/ + /*U+10100-U+1013F*/ + /* Bit 102 Ancient Greek Numbers */ +#define TT_UCR_ANCIENT_GREEK_NUMBERS (1L << 6) /*U+10140-U+1018F*/ + /* Bit 103 Ugaritic */ +#define TT_UCR_UGARITIC (1L << 7) /*U+10380-U+1039F*/ + /* Bit 104 Old Persian */ +#define TT_UCR_OLD_PERSIAN (1L << 8) /*U+103A0-U+103DF*/ + /* Bit 105 Shavian */ +#define TT_UCR_SHAVIAN (1L << 9) /*U+10450-U+1047F*/ + /* Bit 106 Osmanya */ +#define TT_UCR_OSMANYA (1L << 10) /*U+10480-U+104AF*/ + /* Bit 107 Cypriot Syllabary */ +#define TT_UCR_CYPRIOT_SYLLABARY (1L << 11) /*U+10800-U+1083F*/ + /* Bit 108 Kharoshthi */ +#define TT_UCR_KHAROSHTHI (1L << 12) /*U+10A00-U+10A5F*/ + /* Bit 109 Tai Xuan Jing Symbols */ +#define TT_UCR_TAI_XUAN_JING (1L << 13) /*U+1D300-U+1D35F*/ + /* Bit 110 Cuneiform */ + /* Cuneiform Numbers and Punctuation */ +#define TT_UCR_CUNEIFORM (1L << 14) /*U+12000-U+123FF*/ + /*U+12400-U+1247F*/ + /* Bit 111 Counting Rod Numerals */ +#define TT_UCR_COUNTING_ROD_NUMERALS (1L << 15) /*U+1D360-U+1D37F*/ + /* Bit 112 Sundanese */ +#define TT_UCR_SUNDANESE (1L << 16) /* U+1B80-U+1BBF */ + /* Bit 113 Lepcha */ +#define TT_UCR_LEPCHA (1L << 17) /* U+1C00-U+1C4F */ + /* Bit 114 Ol Chiki */ +#define TT_UCR_OL_CHIKI (1L << 18) /* U+1C50-U+1C7F */ + /* Bit 115 Saurashtra */ +#define TT_UCR_SAURASHTRA (1L << 19) /* U+A880-U+A8DF */ + /* Bit 116 Kayah Li */ +#define TT_UCR_KAYAH_LI (1L << 20) /* U+A900-U+A92F */ + /* Bit 117 Rejang */ +#define TT_UCR_REJANG (1L << 21) /* U+A930-U+A95F */ + /* Bit 118 Cham */ +#define TT_UCR_CHAM (1L << 22) /* U+AA00-U+AA5F */ + /* Bit 119 Ancient Symbols */ +#define TT_UCR_ANCIENT_SYMBOLS (1L << 23) /*U+10190-U+101CF*/ + /* Bit 120 Phaistos Disc */ +#define TT_UCR_PHAISTOS_DISC (1L << 24) /*U+101D0-U+101FF*/ + /* Bit 121 Carian */ + /* Lycian */ + /* Lydian */ +#define TT_UCR_OLD_ANATOLIAN (1L << 25) /*U+102A0-U+102DF*/ + /*U+10280-U+1029F*/ + /*U+10920-U+1093F*/ + /* Bit 122 Domino Tiles */ + /* Mahjong Tiles */ +#define TT_UCR_GAME_TILES (1L << 26) /*U+1F030-U+1F09F*/ + /*U+1F000-U+1F02F*/ + /* Bit 123-127 Reserved for process-internal usage */ + + /* */ + + /* for backward compatibility with older FreeType versions */ +#define TT_UCR_ARABIC_PRESENTATION_A \ + TT_UCR_ARABIC_PRESENTATION_FORMS_A +#define TT_UCR_ARABIC_PRESENTATION_B \ + TT_UCR_ARABIC_PRESENTATION_FORMS_B + +#define TT_UCR_COMBINING_DIACRITICS \ + TT_UCR_COMBINING_DIACRITICAL_MARKS +#define TT_UCR_COMBINING_DIACRITICS_SYMB \ + TT_UCR_COMBINING_DIACRITICAL_MARKS_SYMB + + +FT_END_HEADER + +#endif /* TTNAMEID_H_ */ + + +/* END */ diff --git a/Example/include/freetype/tttables.h b/Example/include/freetype/tttables.h new file mode 100644 index 0000000..a9f60e7 --- /dev/null +++ b/Example/include/freetype/tttables.h @@ -0,0 +1,855 @@ +/**************************************************************************** + * + * tttables.h + * + * Basic SFNT/TrueType tables definitions and interface + * (specification only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef TTTABLES_H_ +#define TTTABLES_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + /************************************************************************** + * + * @section: + * truetype_tables + * + * @title: + * TrueType Tables + * + * @abstract: + * TrueType-specific table types and functions. + * + * @description: + * This section contains definitions of some basic tables specific to + * TrueType and OpenType as well as some routines used to access and + * process them. + * + * @order: + * TT_Header + * TT_HoriHeader + * TT_VertHeader + * TT_OS2 + * TT_Postscript + * TT_PCLT + * TT_MaxProfile + * + * FT_Sfnt_Tag + * FT_Get_Sfnt_Table + * FT_Load_Sfnt_Table + * FT_Sfnt_Table_Info + * + * FT_Get_CMap_Language_ID + * FT_Get_CMap_Format + * + * FT_PARAM_TAG_UNPATENTED_HINTING + * + */ + + + /************************************************************************** + * + * @struct: + * TT_Header + * + * @description: + * A structure to model a TrueType font header table. All fields follow + * the OpenType specification. The 64-bit timestamps are stored in + * two-element arrays `Created` and `Modified`, first the upper then + * the lower 32~bits. + */ + typedef struct TT_Header_ + { + FT_Fixed Table_Version; + FT_Fixed Font_Revision; + + FT_Long CheckSum_Adjust; + FT_Long Magic_Number; + + FT_UShort Flags; + FT_UShort Units_Per_EM; + + FT_ULong Created [2]; + FT_ULong Modified[2]; + + FT_Short xMin; + FT_Short yMin; + FT_Short xMax; + FT_Short yMax; + + FT_UShort Mac_Style; + FT_UShort Lowest_Rec_PPEM; + + FT_Short Font_Direction; + FT_Short Index_To_Loc_Format; + FT_Short Glyph_Data_Format; + + } TT_Header; + + + /************************************************************************** + * + * @struct: + * TT_HoriHeader + * + * @description: + * A structure to model a TrueType horizontal header, the 'hhea' table, + * as well as the corresponding horizontal metrics table, 'hmtx'. + * + * @fields: + * Version :: + * The table version. + * + * Ascender :: + * The font's ascender, i.e., the distance from the baseline to the + * top-most of all glyph points found in the font. + * + * This value is invalid in many fonts, as it is usually set by the + * font designer, and often reflects only a portion of the glyphs found + * in the font (maybe ASCII). + * + * You should use the `sTypoAscender` field of the 'OS/2' table instead + * if you want the correct one. + * + * Descender :: + * The font's descender, i.e., the distance from the baseline to the + * bottom-most of all glyph points found in the font. It is negative. + * + * This value is invalid in many fonts, as it is usually set by the + * font designer, and often reflects only a portion of the glyphs found + * in the font (maybe ASCII). + * + * You should use the `sTypoDescender` field of the 'OS/2' table + * instead if you want the correct one. + * + * Line_Gap :: + * The font's line gap, i.e., the distance to add to the ascender and + * descender to get the BTB, i.e., the baseline-to-baseline distance + * for the font. + * + * advance_Width_Max :: + * This field is the maximum of all advance widths found in the font. + * It can be used to compute the maximum width of an arbitrary string + * of text. + * + * min_Left_Side_Bearing :: + * The minimum left side bearing of all glyphs within the font. + * + * min_Right_Side_Bearing :: + * The minimum right side bearing of all glyphs within the font. + * + * xMax_Extent :: + * The maximum horizontal extent (i.e., the 'width' of a glyph's + * bounding box) for all glyphs in the font. + * + * caret_Slope_Rise :: + * The rise coefficient of the cursor's slope of the cursor + * (slope=rise/run). + * + * caret_Slope_Run :: + * The run coefficient of the cursor's slope. + * + * caret_Offset :: + * The cursor's offset for slanted fonts. + * + * Reserved :: + * 8~reserved bytes. + * + * metric_Data_Format :: + * Always~0. + * + * number_Of_HMetrics :: + * Number of HMetrics entries in the 'hmtx' table -- this value can be + * smaller than the total number of glyphs in the font. + * + * long_metrics :: + * A pointer into the 'hmtx' table. + * + * short_metrics :: + * A pointer into the 'hmtx' table. + * + * @note: + * For an OpenType variation font, the values of the following fields can + * change after a call to @FT_Set_Var_Design_Coordinates (and friends) if + * the font contains an 'MVAR' table: `caret_Slope_Rise`, + * `caret_Slope_Run`, and `caret_Offset`. + */ + typedef struct TT_HoriHeader_ + { + FT_Fixed Version; + FT_Short Ascender; + FT_Short Descender; + FT_Short Line_Gap; + + FT_UShort advance_Width_Max; /* advance width maximum */ + + FT_Short min_Left_Side_Bearing; /* minimum left-sb */ + FT_Short min_Right_Side_Bearing; /* minimum right-sb */ + FT_Short xMax_Extent; /* xmax extents */ + FT_Short caret_Slope_Rise; + FT_Short caret_Slope_Run; + FT_Short caret_Offset; + + FT_Short Reserved[4]; + + FT_Short metric_Data_Format; + FT_UShort number_Of_HMetrics; + + /* The following fields are not defined by the OpenType specification */ + /* but they are used to connect the metrics header to the relevant */ + /* 'hmtx' table. */ + + void* long_metrics; + void* short_metrics; + + } TT_HoriHeader; + + + /************************************************************************** + * + * @struct: + * TT_VertHeader + * + * @description: + * A structure used to model a TrueType vertical header, the 'vhea' + * table, as well as the corresponding vertical metrics table, 'vmtx'. + * + * @fields: + * Version :: + * The table version. + * + * Ascender :: + * The font's ascender, i.e., the distance from the baseline to the + * top-most of all glyph points found in the font. + * + * This value is invalid in many fonts, as it is usually set by the + * font designer, and often reflects only a portion of the glyphs found + * in the font (maybe ASCII). + * + * You should use the `sTypoAscender` field of the 'OS/2' table instead + * if you want the correct one. + * + * Descender :: + * The font's descender, i.e., the distance from the baseline to the + * bottom-most of all glyph points found in the font. It is negative. + * + * This value is invalid in many fonts, as it is usually set by the + * font designer, and often reflects only a portion of the glyphs found + * in the font (maybe ASCII). + * + * You should use the `sTypoDescender` field of the 'OS/2' table + * instead if you want the correct one. + * + * Line_Gap :: + * The font's line gap, i.e., the distance to add to the ascender and + * descender to get the BTB, i.e., the baseline-to-baseline distance + * for the font. + * + * advance_Height_Max :: + * This field is the maximum of all advance heights found in the font. + * It can be used to compute the maximum height of an arbitrary string + * of text. + * + * min_Top_Side_Bearing :: + * The minimum top side bearing of all glyphs within the font. + * + * min_Bottom_Side_Bearing :: + * The minimum bottom side bearing of all glyphs within the font. + * + * yMax_Extent :: + * The maximum vertical extent (i.e., the 'height' of a glyph's + * bounding box) for all glyphs in the font. + * + * caret_Slope_Rise :: + * The rise coefficient of the cursor's slope of the cursor + * (slope=rise/run). + * + * caret_Slope_Run :: + * The run coefficient of the cursor's slope. + * + * caret_Offset :: + * The cursor's offset for slanted fonts. + * + * Reserved :: + * 8~reserved bytes. + * + * metric_Data_Format :: + * Always~0. + * + * number_Of_VMetrics :: + * Number of VMetrics entries in the 'vmtx' table -- this value can be + * smaller than the total number of glyphs in the font. + * + * long_metrics :: + * A pointer into the 'vmtx' table. + * + * short_metrics :: + * A pointer into the 'vmtx' table. + * + * @note: + * For an OpenType variation font, the values of the following fields can + * change after a call to @FT_Set_Var_Design_Coordinates (and friends) if + * the font contains an 'MVAR' table: `Ascender`, `Descender`, + * `Line_Gap`, `caret_Slope_Rise`, `caret_Slope_Run`, and `caret_Offset`. + */ + typedef struct TT_VertHeader_ + { + FT_Fixed Version; + FT_Short Ascender; + FT_Short Descender; + FT_Short Line_Gap; + + FT_UShort advance_Height_Max; /* advance height maximum */ + + FT_Short min_Top_Side_Bearing; /* minimum top-sb */ + FT_Short min_Bottom_Side_Bearing; /* minimum bottom-sb */ + FT_Short yMax_Extent; /* ymax extents */ + FT_Short caret_Slope_Rise; + FT_Short caret_Slope_Run; + FT_Short caret_Offset; + + FT_Short Reserved[4]; + + FT_Short metric_Data_Format; + FT_UShort number_Of_VMetrics; + + /* The following fields are not defined by the OpenType specification */ + /* but they are used to connect the metrics header to the relevant */ + /* 'vmtx' table. */ + + void* long_metrics; + void* short_metrics; + + } TT_VertHeader; + + + /************************************************************************** + * + * @struct: + * TT_OS2 + * + * @description: + * A structure to model a TrueType 'OS/2' table. All fields comply to + * the OpenType specification. + * + * Note that we now support old Mac fonts that do not include an 'OS/2' + * table. In this case, the `version` field is always set to 0xFFFF. + * + * @note: + * For an OpenType variation font, the values of the following fields can + * change after a call to @FT_Set_Var_Design_Coordinates (and friends) if + * the font contains an 'MVAR' table: `sCapHeight`, `sTypoAscender`, + * `sTypoDescender`, `sTypoLineGap`, `sxHeight`, `usWinAscent`, + * `usWinDescent`, `yStrikeoutPosition`, `yStrikeoutSize`, + * `ySubscriptXOffset`, `ySubScriptXSize`, `ySubscriptYOffset`, + * `ySubscriptYSize`, `ySuperscriptXOffset`, `ySuperscriptXSize`, + * `ySuperscriptYOffset`, and `ySuperscriptYSize`. + * + * Possible values for bits in the `ulUnicodeRangeX` fields are given by + * the @TT_UCR_XXX macros. + */ + + typedef struct TT_OS2_ + { + FT_UShort version; /* 0x0001 - more or 0xFFFF */ + FT_Short xAvgCharWidth; + FT_UShort usWeightClass; + FT_UShort usWidthClass; + FT_UShort fsType; + FT_Short ySubscriptXSize; + FT_Short ySubscriptYSize; + FT_Short ySubscriptXOffset; + FT_Short ySubscriptYOffset; + FT_Short ySuperscriptXSize; + FT_Short ySuperscriptYSize; + FT_Short ySuperscriptXOffset; + FT_Short ySuperscriptYOffset; + FT_Short yStrikeoutSize; + FT_Short yStrikeoutPosition; + FT_Short sFamilyClass; + + FT_Byte panose[10]; + + FT_ULong ulUnicodeRange1; /* Bits 0-31 */ + FT_ULong ulUnicodeRange2; /* Bits 32-63 */ + FT_ULong ulUnicodeRange3; /* Bits 64-95 */ + FT_ULong ulUnicodeRange4; /* Bits 96-127 */ + + FT_Char achVendID[4]; + + FT_UShort fsSelection; + FT_UShort usFirstCharIndex; + FT_UShort usLastCharIndex; + FT_Short sTypoAscender; + FT_Short sTypoDescender; + FT_Short sTypoLineGap; + FT_UShort usWinAscent; + FT_UShort usWinDescent; + + /* only version 1 and higher: */ + + FT_ULong ulCodePageRange1; /* Bits 0-31 */ + FT_ULong ulCodePageRange2; /* Bits 32-63 */ + + /* only version 2 and higher: */ + + FT_Short sxHeight; + FT_Short sCapHeight; + FT_UShort usDefaultChar; + FT_UShort usBreakChar; + FT_UShort usMaxContext; + + /* only version 5 and higher: */ + + FT_UShort usLowerOpticalPointSize; /* in twips (1/20 points) */ + FT_UShort usUpperOpticalPointSize; /* in twips (1/20 points) */ + + } TT_OS2; + + + /************************************************************************** + * + * @struct: + * TT_Postscript + * + * @description: + * A structure to model a TrueType 'post' table. All fields comply to + * the OpenType specification. This structure does not reference a + * font's PostScript glyph names; use @FT_Get_Glyph_Name to retrieve + * them. + * + * @note: + * For an OpenType variation font, the values of the following fields can + * change after a call to @FT_Set_Var_Design_Coordinates (and friends) if + * the font contains an 'MVAR' table: `underlinePosition` and + * `underlineThickness`. + */ + typedef struct TT_Postscript_ + { + FT_Fixed FormatType; + FT_Fixed italicAngle; + FT_Short underlinePosition; + FT_Short underlineThickness; + FT_ULong isFixedPitch; + FT_ULong minMemType42; + FT_ULong maxMemType42; + FT_ULong minMemType1; + FT_ULong maxMemType1; + + /* Glyph names follow in the 'post' table, but we don't */ + /* load them by default. */ + + } TT_Postscript; + + + /************************************************************************** + * + * @struct: + * TT_PCLT + * + * @description: + * A structure to model a TrueType 'PCLT' table. All fields comply to + * the OpenType specification. + */ + typedef struct TT_PCLT_ + { + FT_Fixed Version; + FT_ULong FontNumber; + FT_UShort Pitch; + FT_UShort xHeight; + FT_UShort Style; + FT_UShort TypeFamily; + FT_UShort CapHeight; + FT_UShort SymbolSet; + FT_Char TypeFace[16]; + FT_Char CharacterComplement[8]; + FT_Char FileName[6]; + FT_Char StrokeWeight; + FT_Char WidthType; + FT_Byte SerifStyle; + FT_Byte Reserved; + + } TT_PCLT; + + + /************************************************************************** + * + * @struct: + * TT_MaxProfile + * + * @description: + * The maximum profile ('maxp') table contains many max values, which can + * be used to pre-allocate arrays for speeding up glyph loading and + * hinting. + * + * @fields: + * version :: + * The version number. + * + * numGlyphs :: + * The number of glyphs in this TrueType font. + * + * maxPoints :: + * The maximum number of points in a non-composite TrueType glyph. See + * also `maxCompositePoints`. + * + * maxContours :: + * The maximum number of contours in a non-composite TrueType glyph. + * See also `maxCompositeContours`. + * + * maxCompositePoints :: + * The maximum number of points in a composite TrueType glyph. See + * also `maxPoints`. + * + * maxCompositeContours :: + * The maximum number of contours in a composite TrueType glyph. See + * also `maxContours`. + * + * maxZones :: + * The maximum number of zones used for glyph hinting. + * + * maxTwilightPoints :: + * The maximum number of points in the twilight zone used for glyph + * hinting. + * + * maxStorage :: + * The maximum number of elements in the storage area used for glyph + * hinting. + * + * maxFunctionDefs :: + * The maximum number of function definitions in the TrueType bytecode + * for this font. + * + * maxInstructionDefs :: + * The maximum number of instruction definitions in the TrueType + * bytecode for this font. + * + * maxStackElements :: + * The maximum number of stack elements used during bytecode + * interpretation. + * + * maxSizeOfInstructions :: + * The maximum number of TrueType opcodes used for glyph hinting. + * + * maxComponentElements :: + * The maximum number of simple (i.e., non-composite) glyphs in a + * composite glyph. + * + * maxComponentDepth :: + * The maximum nesting depth of composite glyphs. + * + * @note: + * This structure is only used during font loading. + */ + typedef struct TT_MaxProfile_ + { + FT_Fixed version; + FT_UShort numGlyphs; + FT_UShort maxPoints; + FT_UShort maxContours; + FT_UShort maxCompositePoints; + FT_UShort maxCompositeContours; + FT_UShort maxZones; + FT_UShort maxTwilightPoints; + FT_UShort maxStorage; + FT_UShort maxFunctionDefs; + FT_UShort maxInstructionDefs; + FT_UShort maxStackElements; + FT_UShort maxSizeOfInstructions; + FT_UShort maxComponentElements; + FT_UShort maxComponentDepth; + + } TT_MaxProfile; + + + /************************************************************************** + * + * @enum: + * FT_Sfnt_Tag + * + * @description: + * An enumeration to specify indices of SFNT tables loaded and parsed by + * FreeType during initialization of an SFNT font. Used in the + * @FT_Get_Sfnt_Table API function. + * + * @values: + * FT_SFNT_HEAD :: + * To access the font's @TT_Header structure. + * + * FT_SFNT_MAXP :: + * To access the font's @TT_MaxProfile structure. + * + * FT_SFNT_OS2 :: + * To access the font's @TT_OS2 structure. + * + * FT_SFNT_HHEA :: + * To access the font's @TT_HoriHeader structure. + * + * FT_SFNT_VHEA :: + * To access the font's @TT_VertHeader structure. + * + * FT_SFNT_POST :: + * To access the font's @TT_Postscript structure. + * + * FT_SFNT_PCLT :: + * To access the font's @TT_PCLT structure. + */ + typedef enum FT_Sfnt_Tag_ + { + FT_SFNT_HEAD, + FT_SFNT_MAXP, + FT_SFNT_OS2, + FT_SFNT_HHEA, + FT_SFNT_VHEA, + FT_SFNT_POST, + FT_SFNT_PCLT, + + FT_SFNT_MAX + + } FT_Sfnt_Tag; + + /* these constants are deprecated; use the corresponding `FT_Sfnt_Tag` */ + /* values instead */ +#define ft_sfnt_head FT_SFNT_HEAD +#define ft_sfnt_maxp FT_SFNT_MAXP +#define ft_sfnt_os2 FT_SFNT_OS2 +#define ft_sfnt_hhea FT_SFNT_HHEA +#define ft_sfnt_vhea FT_SFNT_VHEA +#define ft_sfnt_post FT_SFNT_POST +#define ft_sfnt_pclt FT_SFNT_PCLT + + + /************************************************************************** + * + * @function: + * FT_Get_Sfnt_Table + * + * @description: + * Return a pointer to a given SFNT table stored within a face. + * + * @input: + * face :: + * A handle to the source. + * + * tag :: + * The index of the SFNT table. + * + * @return: + * A type-less pointer to the table. This will be `NULL` in case of + * error, or if the corresponding table was not found **OR** loaded from + * the file. + * + * Use a typecast according to `tag` to access the structure elements. + * + * @note: + * The table is owned by the face object and disappears with it. + * + * This function is only useful to access SFNT tables that are loaded by + * the sfnt, truetype, and opentype drivers. See @FT_Sfnt_Tag for a + * list. + * + * @example: + * Here is an example demonstrating access to the 'vhea' table. + * + * ``` + * TT_VertHeader* vert_header; + * + * + * vert_header = + * (TT_VertHeader*)FT_Get_Sfnt_Table( face, FT_SFNT_VHEA ); + * ``` + */ + FT_EXPORT( void* ) + FT_Get_Sfnt_Table( FT_Face face, + FT_Sfnt_Tag tag ); + + + /************************************************************************** + * + * @function: + * FT_Load_Sfnt_Table + * + * @description: + * Load any SFNT font table into client memory. + * + * @input: + * face :: + * A handle to the source face. + * + * tag :: + * The four-byte tag of the table to load. Use value~0 if you want to + * access the whole font file. Otherwise, you can use one of the + * definitions found in the @FT_TRUETYPE_TAGS_H file, or forge a new + * one with @FT_MAKE_TAG. + * + * offset :: + * The starting offset in the table (or file if tag~==~0). + * + * @output: + * buffer :: + * The target buffer address. The client must ensure that the memory + * array is big enough to hold the data. + * + * @inout: + * length :: + * If the `length` parameter is `NULL`, try to load the whole table. + * Return an error code if it fails. + * + * Else, if `*length` is~0, exit immediately while returning the + * table's (or file) full size in it. + * + * Else the number of bytes to read from the table or file, from the + * starting offset. + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If you need to determine the table's length you should first call this + * function with `*length` set to~0, as in the following example: + * + * ``` + * FT_ULong length = 0; + * + * + * error = FT_Load_Sfnt_Table( face, tag, 0, NULL, &length ); + * if ( error ) { ... table does not exist ... } + * + * buffer = malloc( length ); + * if ( buffer == NULL ) { ... not enough memory ... } + * + * error = FT_Load_Sfnt_Table( face, tag, 0, buffer, &length ); + * if ( error ) { ... could not load table ... } + * ``` + * + * Note that structures like @TT_Header or @TT_OS2 can't be used with + * this function; they are limited to @FT_Get_Sfnt_Table. Reason is that + * those structures depend on the processor architecture, with varying + * size (e.g. 32bit vs. 64bit) or order (big endian vs. little endian). + * + */ + FT_EXPORT( FT_Error ) + FT_Load_Sfnt_Table( FT_Face face, + FT_ULong tag, + FT_Long offset, + FT_Byte* buffer, + FT_ULong* length ); + + + /************************************************************************** + * + * @function: + * FT_Sfnt_Table_Info + * + * @description: + * Return information on an SFNT table. + * + * @input: + * face :: + * A handle to the source face. + * + * table_index :: + * The index of an SFNT table. The function returns + * FT_Err_Table_Missing for an invalid value. + * + * @inout: + * tag :: + * The name tag of the SFNT table. If the value is `NULL`, + * `table_index` is ignored, and `length` returns the number of SFNT + * tables in the font. + * + * @output: + * length :: + * The length of the SFNT table (or the number of SFNT tables, + * depending on `tag`). + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * While parsing fonts, FreeType handles SFNT tables with length zero as + * missing. + * + */ + FT_EXPORT( FT_Error ) + FT_Sfnt_Table_Info( FT_Face face, + FT_UInt table_index, + FT_ULong *tag, + FT_ULong *length ); + + + /************************************************************************** + * + * @function: + * FT_Get_CMap_Language_ID + * + * @description: + * Return cmap language ID as specified in the OpenType standard. + * Definitions of language ID values are in file @FT_TRUETYPE_IDS_H. + * + * @input: + * charmap :: + * The target charmap. + * + * @return: + * The language ID of `charmap`. If `charmap` doesn't belong to an SFNT + * face, just return~0 as the default value. + * + * For a format~14 cmap (to access Unicode IVS), the return value is + * 0xFFFFFFFF. + */ + FT_EXPORT( FT_ULong ) + FT_Get_CMap_Language_ID( FT_CharMap charmap ); + + + /************************************************************************** + * + * @function: + * FT_Get_CMap_Format + * + * @description: + * Return the format of an SFNT 'cmap' table. + * + * @input: + * charmap :: + * The target charmap. + * + * @return: + * The format of `charmap`. If `charmap` doesn't belong to an SFNT face, + * return -1. + */ + FT_EXPORT( FT_Long ) + FT_Get_CMap_Format( FT_CharMap charmap ); + + /* */ + + +FT_END_HEADER + +#endif /* TTTABLES_H_ */ + + +/* END */ diff --git a/Example/include/freetype/tttags.h b/Example/include/freetype/tttags.h new file mode 100644 index 0000000..9bf4fca --- /dev/null +++ b/Example/include/freetype/tttags.h @@ -0,0 +1,124 @@ +/**************************************************************************** + * + * tttags.h + * + * Tags for TrueType and OpenType tables (specification only). + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + +#ifndef TTAGS_H_ +#define TTAGS_H_ + + +#include + +#ifdef FREETYPE_H +#error "freetype.h of FreeType 1 has been loaded!" +#error "Please fix the directory search order for header files" +#error "so that freetype.h of FreeType 2 is found first." +#endif + + +FT_BEGIN_HEADER + + +#define TTAG_avar FT_MAKE_TAG( 'a', 'v', 'a', 'r' ) +#define TTAG_BASE FT_MAKE_TAG( 'B', 'A', 'S', 'E' ) +#define TTAG_bdat FT_MAKE_TAG( 'b', 'd', 'a', 't' ) +#define TTAG_BDF FT_MAKE_TAG( 'B', 'D', 'F', ' ' ) +#define TTAG_bhed FT_MAKE_TAG( 'b', 'h', 'e', 'd' ) +#define TTAG_bloc FT_MAKE_TAG( 'b', 'l', 'o', 'c' ) +#define TTAG_bsln FT_MAKE_TAG( 'b', 's', 'l', 'n' ) +#define TTAG_CBDT FT_MAKE_TAG( 'C', 'B', 'D', 'T' ) +#define TTAG_CBLC FT_MAKE_TAG( 'C', 'B', 'L', 'C' ) +#define TTAG_CFF FT_MAKE_TAG( 'C', 'F', 'F', ' ' ) +#define TTAG_CFF2 FT_MAKE_TAG( 'C', 'F', 'F', '2' ) +#define TTAG_CID FT_MAKE_TAG( 'C', 'I', 'D', ' ' ) +#define TTAG_cmap FT_MAKE_TAG( 'c', 'm', 'a', 'p' ) +#define TTAG_COLR FT_MAKE_TAG( 'C', 'O', 'L', 'R' ) +#define TTAG_CPAL FT_MAKE_TAG( 'C', 'P', 'A', 'L' ) +#define TTAG_cvar FT_MAKE_TAG( 'c', 'v', 'a', 'r' ) +#define TTAG_cvt FT_MAKE_TAG( 'c', 'v', 't', ' ' ) +#define TTAG_DSIG FT_MAKE_TAG( 'D', 'S', 'I', 'G' ) +#define TTAG_EBDT FT_MAKE_TAG( 'E', 'B', 'D', 'T' ) +#define TTAG_EBLC FT_MAKE_TAG( 'E', 'B', 'L', 'C' ) +#define TTAG_EBSC FT_MAKE_TAG( 'E', 'B', 'S', 'C' ) +#define TTAG_feat FT_MAKE_TAG( 'f', 'e', 'a', 't' ) +#define TTAG_FOND FT_MAKE_TAG( 'F', 'O', 'N', 'D' ) +#define TTAG_fpgm FT_MAKE_TAG( 'f', 'p', 'g', 'm' ) +#define TTAG_fvar FT_MAKE_TAG( 'f', 'v', 'a', 'r' ) +#define TTAG_gasp FT_MAKE_TAG( 'g', 'a', 's', 'p' ) +#define TTAG_GDEF FT_MAKE_TAG( 'G', 'D', 'E', 'F' ) +#define TTAG_glyf FT_MAKE_TAG( 'g', 'l', 'y', 'f' ) +#define TTAG_GPOS FT_MAKE_TAG( 'G', 'P', 'O', 'S' ) +#define TTAG_GSUB FT_MAKE_TAG( 'G', 'S', 'U', 'B' ) +#define TTAG_gvar FT_MAKE_TAG( 'g', 'v', 'a', 'r' ) +#define TTAG_HVAR FT_MAKE_TAG( 'H', 'V', 'A', 'R' ) +#define TTAG_hdmx FT_MAKE_TAG( 'h', 'd', 'm', 'x' ) +#define TTAG_head FT_MAKE_TAG( 'h', 'e', 'a', 'd' ) +#define TTAG_hhea FT_MAKE_TAG( 'h', 'h', 'e', 'a' ) +#define TTAG_hmtx FT_MAKE_TAG( 'h', 'm', 't', 'x' ) +#define TTAG_JSTF FT_MAKE_TAG( 'J', 'S', 'T', 'F' ) +#define TTAG_just FT_MAKE_TAG( 'j', 'u', 's', 't' ) +#define TTAG_kern FT_MAKE_TAG( 'k', 'e', 'r', 'n' ) +#define TTAG_lcar FT_MAKE_TAG( 'l', 'c', 'a', 'r' ) +#define TTAG_loca FT_MAKE_TAG( 'l', 'o', 'c', 'a' ) +#define TTAG_LTSH FT_MAKE_TAG( 'L', 'T', 'S', 'H' ) +#define TTAG_LWFN FT_MAKE_TAG( 'L', 'W', 'F', 'N' ) +#define TTAG_MATH FT_MAKE_TAG( 'M', 'A', 'T', 'H' ) +#define TTAG_maxp FT_MAKE_TAG( 'm', 'a', 'x', 'p' ) +#define TTAG_META FT_MAKE_TAG( 'M', 'E', 'T', 'A' ) +#define TTAG_MMFX FT_MAKE_TAG( 'M', 'M', 'F', 'X' ) +#define TTAG_MMSD FT_MAKE_TAG( 'M', 'M', 'S', 'D' ) +#define TTAG_mort FT_MAKE_TAG( 'm', 'o', 'r', 't' ) +#define TTAG_morx FT_MAKE_TAG( 'm', 'o', 'r', 'x' ) +#define TTAG_MVAR FT_MAKE_TAG( 'M', 'V', 'A', 'R' ) +#define TTAG_name FT_MAKE_TAG( 'n', 'a', 'm', 'e' ) +#define TTAG_opbd FT_MAKE_TAG( 'o', 'p', 'b', 'd' ) +#define TTAG_OS2 FT_MAKE_TAG( 'O', 'S', '/', '2' ) +#define TTAG_OTTO FT_MAKE_TAG( 'O', 'T', 'T', 'O' ) +#define TTAG_PCLT FT_MAKE_TAG( 'P', 'C', 'L', 'T' ) +#define TTAG_POST FT_MAKE_TAG( 'P', 'O', 'S', 'T' ) +#define TTAG_post FT_MAKE_TAG( 'p', 'o', 's', 't' ) +#define TTAG_prep FT_MAKE_TAG( 'p', 'r', 'e', 'p' ) +#define TTAG_prop FT_MAKE_TAG( 'p', 'r', 'o', 'p' ) +#define TTAG_sbix FT_MAKE_TAG( 's', 'b', 'i', 'x' ) +#define TTAG_sfnt FT_MAKE_TAG( 's', 'f', 'n', 't' ) +#define TTAG_SING FT_MAKE_TAG( 'S', 'I', 'N', 'G' ) +#define TTAG_SVG FT_MAKE_TAG( 'S', 'V', 'G', ' ' ) +#define TTAG_trak FT_MAKE_TAG( 't', 'r', 'a', 'k' ) +#define TTAG_true FT_MAKE_TAG( 't', 'r', 'u', 'e' ) +#define TTAG_ttc FT_MAKE_TAG( 't', 't', 'c', ' ' ) +#define TTAG_ttcf FT_MAKE_TAG( 't', 't', 'c', 'f' ) +#define TTAG_TYP1 FT_MAKE_TAG( 'T', 'Y', 'P', '1' ) +#define TTAG_typ1 FT_MAKE_TAG( 't', 'y', 'p', '1' ) +#define TTAG_VDMX FT_MAKE_TAG( 'V', 'D', 'M', 'X' ) +#define TTAG_vhea FT_MAKE_TAG( 'v', 'h', 'e', 'a' ) +#define TTAG_vmtx FT_MAKE_TAG( 'v', 'm', 't', 'x' ) +#define TTAG_VVAR FT_MAKE_TAG( 'V', 'V', 'A', 'R' ) +#define TTAG_wOFF FT_MAKE_TAG( 'w', 'O', 'F', 'F' ) +#define TTAG_wOF2 FT_MAKE_TAG( 'w', 'O', 'F', '2' ) + +/* used by "Keyboard.dfont" on legacy Mac OS X */ +#define TTAG_0xA5kbd FT_MAKE_TAG( 0xA5, 'k', 'b', 'd' ) + +/* used by "LastResort.dfont" on legacy Mac OS X */ +#define TTAG_0xA5lst FT_MAKE_TAG( 0xA5, 'l', 's', 't' ) + + +FT_END_HEADER + +#endif /* TTAGS_H_ */ + + +/* END */ diff --git a/Example/include/ft2build.h b/Example/include/ft2build.h new file mode 100644 index 0000000..58491ce --- /dev/null +++ b/Example/include/ft2build.h @@ -0,0 +1,42 @@ +/**************************************************************************** + * + * ft2build.h + * + * FreeType 2 build and setup macros. + * + * Copyright (C) 1996-2023 by + * David Turner, Robert Wilhelm, and Werner Lemberg. + * + * This file is part of the FreeType project, and may only be used, + * modified, and distributed under the terms of the FreeType project + * license, LICENSE.TXT. By continuing to use, modify, or distribute + * this file you indicate that you have read the license and + * understand and accept it fully. + * + */ + + + /************************************************************************** + * + * This is the 'entry point' for FreeType header file inclusions, to be + * loaded before all other header files. + * + * A typical example is + * + * ``` + * #include + * #include + * ``` + * + */ + + +#ifndef FT2BUILD_H_ +#define FT2BUILD_H_ + +#include + +#endif /* FT2BUILD_H_ */ + + +/* END */ diff --git a/Example/libraries/freetype.dll b/Example/libraries/freetype.dll new file mode 100644 index 0000000..d651953 Binary files /dev/null and b/Example/libraries/freetype.dll differ diff --git a/Example/libraries/freetype.lib b/Example/libraries/freetype.lib new file mode 100644 index 0000000..cc48d41 Binary files /dev/null and b/Example/libraries/freetype.lib differ diff --git a/Example/lucon.ttf b/Example/lucon.ttf new file mode 100644 index 0000000..db9fb00 Binary files /dev/null and b/Example/lucon.ttf differ diff --git a/Example/main.cpp b/Example/main.cpp new file mode 100644 index 0000000..f11b5d4 --- /dev/null +++ b/Example/main.cpp @@ -0,0 +1,162 @@ +#define OLC_PGE_APPLICATION +#include "olcPixelGameEngine.h" + +using namespace olc; + +// Override base class with your custom functionality +class Example : public olc::PixelGameEngine +{ +public: + + class Entity{ + protected: + Entity(vf2d pos) + :pos(pos),col(WHITE){} + vf2d pos; + Entity*copy=nullptr; + Pixel col; + public: + #define className Entity + static className*Create(vf2d pos){ + className*newEnt=new className(pos); + newEnt->copy=new className(pos); + return newEnt; + }; + virtual void Reset(){ + pos=copy->pos; + } + virtual void Update(PixelGameEngine*pge){ + pge->DrawCircle(pos,5,col); + }; + }; + + class MovingEntity:public Entity{ + public: + vf2d vel; + #define className MovingEntity + static className*Create(vf2d pos,vf2d vel){ + className*newEnt=new className(pos,vel); + newEnt->copy=new className(pos,vel); + return newEnt; + }; + virtual void Update(PixelGameEngine*pge){ + pos+=vel*pge->GetElapsedTime(); + Entity::Update(pge); + }; + + protected: + MovingEntity(vf2d pos,vf2d vel) + :Entity(pos),vel(vel){} + virtual void Reset()override{ + Entity::Reset(); + MovingEntity*defaultCopy=(MovingEntity*)copy; + vel=defaultCopy->vel; + } + }; + + class PlayerEntity:public MovingEntity{ + public: + #define className PlayerEntity + static className*Create(vf2d startingPos){ + className*newEnt=new className(startingPos); + newEnt->copy=new className(startingPos); + return newEnt; + }; + virtual void Update(PixelGameEngine*pge){ + vel={0,0}; + if(pge->GetKey(A).bHeld){ + vel.x-=12; + } + if(pge->GetKey(D).bHeld){ + vel.x+=12; + } + if(pge->GetKey(W).bHeld){ + vel.y-=12; + } + if(pge->GetKey(S).bHeld){ + vel.y+=12; + } + MovingEntity::Update(pge); + }; + + protected: + PlayerEntity(vf2d startingPos) + :MovingEntity(startingPos,{0,0}){ + col=BLUE; + } + virtual void Reset()override{ + MovingEntity::Reset(); + } + }; + + Example() + { + // Name your application + sAppName = "Example"; + } + + std::vectorlevel1Entities; + std::vectorlevel2Entities; + + std::vectorgameEntities; + + void LoadLevel(std::vector&entityList){ + //Clear out the old entities, we don't care about them anymore. + gameEntities.clear(); + + for(Entity*ent:entityList){ + //Reset the entity to default, so we have a fresh level to work with. + ent->Reset(); + //Load in new entities to our game play area. + gameEntities.push_back(ent); + } + } + +public: + bool OnUserCreate() override + { + // Called once at the start, so create things here + level1Entities.push_back(Entity::Create({20,80})); + level1Entities.push_back(Entity::Create({40,100})); + level1Entities.push_back(PlayerEntity::Create({20,120})); + level1Entities.push_back(MovingEntity::Create({10,70},{1,1})); + + level2Entities.push_back(Entity::Create({80,70})); + level2Entities.push_back(Entity::Create({60,90})); + level2Entities.push_back(PlayerEntity::Create({100,120})); + level2Entities.push_back(MovingEntity::Create({240,240},{-1,-1})); + return true; + } + + bool OnUserUpdate(float fElapsedTime) override + { + Clear(BLACK); + + if(GetKey(K1).bPressed){ + LoadLevel(level1Entities); + } + if(GetKey(K2).bPressed){ + LoadLevel(level2Entities); + } + if(GetKey(SPACE).bPressed){ + ((MovingEntity*)level2Entities[3])->vel={0,0}; + } + + // Called once per frame, draws random coloured pixels + for(Entity*ent:gameEntities){ + ent->Update(this); + } + + DrawString({1,1},"Press <1> to load Level 1,\nor <2> to load Level 2"); + DrawString({1,16},"WASD to move"); + return true; + } +}; + +int main() +{ + Example demo; + if (demo.Construct(256, 240, 4, 4)) + demo.Start(); + return 0; +} \ No newline at end of file diff --git a/Example/miniaudio.h b/Example/miniaudio.h new file mode 100644 index 0000000..b5dc28c --- /dev/null +++ b/Example/miniaudio.h @@ -0,0 +1,92536 @@ +/* +Audio playback and capture library. Choice of public domain or MIT-0. See license statements at the end of this file. +miniaudio - v0.11.18 - 2023-08-07 + +David Reid - mackron@gmail.com + +Website: https://miniaud.io +Documentation: https://miniaud.io/docs +GitHub: https://github.com/mackron/miniaudio +*/ + +/* +1. Introduction +=============== +miniaudio is a single file library for audio playback and capture. To use it, do the following in +one .c file: + +```c +#define MINIAUDIO_IMPLEMENTATION +#include "miniaudio.h" +``` + +You can do `#include "miniaudio.h"` in other parts of the program just like any other header. + +miniaudio includes both low level and high level APIs. The low level API is good for those who want +to do all of their mixing themselves and only require a light weight interface to the underlying +audio device. The high level API is good for those who have complex mixing and effect requirements. + +In miniaudio, objects are transparent structures. Unlike many other libraries, there are no handles +to opaque objects which means you need to allocate memory for objects yourself. In the examples +presented in this documentation you will often see objects declared on the stack. You need to be +careful when translating these examples to your own code so that you don't accidentally declare +your objects on the stack and then cause them to become invalid once the function returns. In +addition, you must ensure the memory address of your objects remain the same throughout their +lifetime. You therefore cannot be making copies of your objects. + +A config/init pattern is used throughout the entire library. The idea is that you set up a config +object and pass that into the initialization routine. The advantage to this system is that the +config object can be initialized with logical defaults and new properties added to it without +breaking the API. The config object can be allocated on the stack and does not need to be +maintained after initialization of the corresponding object. + + +1.1. Low Level API +------------------ +The low level API gives you access to the raw audio data of an audio device. It supports playback, +capture, full-duplex and loopback (WASAPI only). You can enumerate over devices to determine which +physical device(s) you want to connect to. + +The low level API uses the concept of a "device" as the abstraction for physical devices. The idea +is that you choose a physical device to emit or capture audio from, and then move data to/from the +device when miniaudio tells you to. Data is delivered to and from devices asynchronously via a +callback which you specify when initializing the device. + +When initializing the device you first need to configure it. The device configuration allows you to +specify things like the format of the data delivered via the callback, the size of the internal +buffer and the ID of the device you want to emit or capture audio from. + +Once you have the device configuration set up you can initialize the device. When initializing a +device you need to allocate memory for the device object beforehand. This gives the application +complete control over how the memory is allocated. In the example below we initialize a playback +device on the stack, but you could allocate it on the heap if that suits your situation better. + +```c +void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) +{ +// In playback mode copy data to pOutput. In capture mode read data from pInput. In full-duplex mode, both +// pOutput and pInput will be valid and you can move data from pInput into pOutput. Never process more than +// frameCount frames. +} + +int main() +{ +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.format = ma_format_f32; // Set to ma_format_unknown to use the device's native format. +config.playback.channels = 2; // Set to 0 to use the device's native channel count. +config.sampleRate = 48000; // Set to 0 to use the device's native sample rate. +config.dataCallback = data_callback; // This function will be called when miniaudio needs more data. +config.pUserData = pMyCustomData; // Can be accessed from the device object (device.pUserData). + +ma_device device; +if (ma_device_init(NULL, &config, &device) != MA_SUCCESS) { +return -1; // Failed to initialize the device. +} + +ma_device_start(&device); // The device is sleeping by default so you'll need to start it manually. + +// Do something here. Probably your program's main loop. + +ma_device_uninit(&device); // This will stop the device so no need to do that manually. +return 0; +} +``` + +In the example above, `data_callback()` is where audio data is written and read from the device. +The idea is in playback mode you cause sound to be emitted from the speakers by writing audio data +to the output buffer (`pOutput` in the example). In capture mode you read data from the input +buffer (`pInput`) to extract sound captured by the microphone. The `frameCount` parameter tells you +how many frames can be written to the output buffer and read from the input buffer. A "frame" is +one sample for each channel. For example, in a stereo stream (2 channels), one frame is 2 +samples: one for the left, one for the right. The channel count is defined by the device config. +The size in bytes of an individual sample is defined by the sample format which is also specified +in the device config. Multi-channel audio data is always interleaved, which means the samples for +each frame are stored next to each other in memory. For example, in a stereo stream the first pair +of samples will be the left and right samples for the first frame, the second pair of samples will +be the left and right samples for the second frame, etc. + +The configuration of the device is defined by the `ma_device_config` structure. The config object +is always initialized with `ma_device_config_init()`. It's important to always initialize the +config with this function as it initializes it with logical defaults and ensures your program +doesn't break when new members are added to the `ma_device_config` structure. The example above +uses a fairly simple and standard device configuration. The call to `ma_device_config_init()` takes +a single parameter, which is whether or not the device is a playback, capture, duplex or loopback +device (loopback devices are not supported on all backends). The `config.playback.format` member +sets the sample format which can be one of the following (all formats are native-endian): + ++---------------+----------------------------------------+---------------------------+ +| Symbol | Description | Range | ++---------------+----------------------------------------+---------------------------+ +| ma_format_f32 | 32-bit floating point | [-1, 1] | +| ma_format_s16 | 16-bit signed integer | [-32768, 32767] | +| ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607] | +| ma_format_s32 | 32-bit signed integer | [-2147483648, 2147483647] | +| ma_format_u8 | 8-bit unsigned integer | [0, 255] | ++---------------+----------------------------------------+---------------------------+ + +The `config.playback.channels` member sets the number of channels to use with the device. The +channel count cannot exceed MA_MAX_CHANNELS. The `config.sampleRate` member sets the sample rate +(which must be the same for both playback and capture in full-duplex configurations). This is +usually set to 44100 or 48000, but can be set to anything. It's recommended to keep this between +8000 and 384000, however. + +Note that leaving the format, channel count and/or sample rate at their default values will result +in the internal device's native configuration being used which is useful if you want to avoid the +overhead of miniaudio's automatic data conversion. + +In addition to the sample format, channel count and sample rate, the data callback and user data +pointer are also set via the config. The user data pointer is not passed into the callback as a +parameter, but is instead set to the `pUserData` member of `ma_device` which you can access +directly since all miniaudio structures are transparent. + +Initializing the device is done with `ma_device_init()`. This will return a result code telling you +what went wrong, if anything. On success it will return `MA_SUCCESS`. After initialization is +complete the device will be in a stopped state. To start it, use `ma_device_start()`. +Uninitializing the device will stop it, which is what the example above does, but you can also stop +the device with `ma_device_stop()`. To resume the device simply call `ma_device_start()` again. +Note that it's important to never stop or start the device from inside the callback. This will +result in a deadlock. Instead you set a variable or signal an event indicating that the device +needs to stop and handle it in a different thread. The following APIs must never be called inside +the callback: + +```c +ma_device_init() +ma_device_init_ex() +ma_device_uninit() +ma_device_start() +ma_device_stop() +``` + +You must never try uninitializing and reinitializing a device inside the callback. You must also +never try to stop and start it from inside the callback. There are a few other things you shouldn't +do in the callback depending on your requirements, however this isn't so much a thread-safety +thing, but rather a real-time processing thing which is beyond the scope of this introduction. + +The example above demonstrates the initialization of a playback device, but it works exactly the +same for capture. All you need to do is change the device type from `ma_device_type_playback` to +`ma_device_type_capture` when setting up the config, like so: + +```c +ma_device_config config = ma_device_config_init(ma_device_type_capture); +config.capture.format = MY_FORMAT; +config.capture.channels = MY_CHANNEL_COUNT; +``` + +In the data callback you just read from the input buffer (`pInput` in the example above) and leave +the output buffer alone (it will be set to NULL when the device type is set to +`ma_device_type_capture`). + +These are the available device types and how you should handle the buffers in the callback: + ++-------------------------+--------------------------------------------------------+ +| Device Type | Callback Behavior | ++-------------------------+--------------------------------------------------------+ +| ma_device_type_playback | Write to output buffer, leave input buffer untouched. | +| ma_device_type_capture | Read from input buffer, leave output buffer untouched. | +| ma_device_type_duplex | Read from input buffer, write to output buffer. | +| ma_device_type_loopback | Read from input buffer, leave output buffer untouched. | ++-------------------------+--------------------------------------------------------+ + +You will notice in the example above that the sample format and channel count is specified +separately for playback and capture. This is to support different data formats between the playback +and capture devices in a full-duplex system. An example may be that you want to capture audio data +as a monaural stream (one channel), but output sound to a stereo speaker system. Note that if you +use different formats between playback and capture in a full-duplex configuration you will need to +convert the data yourself. There are functions available to help you do this which will be +explained later. + +The example above did not specify a physical device to connect to which means it will use the +operating system's default device. If you have multiple physical devices connected and you want to +use a specific one you will need to specify the device ID in the configuration, like so: + +```c +config.playback.pDeviceID = pMyPlaybackDeviceID; // Only if requesting a playback or duplex device. +config.capture.pDeviceID = pMyCaptureDeviceID; // Only if requesting a capture, duplex or loopback device. +``` + +To retrieve the device ID you will need to perform device enumeration, however this requires the +use of a new concept called the "context". Conceptually speaking the context sits above the device. +There is one context to many devices. The purpose of the context is to represent the backend at a +more global level and to perform operations outside the scope of an individual device. Mainly it is +used for performing run-time linking against backend libraries, initializing backends and +enumerating devices. The example below shows how to enumerate devices. + +```c +ma_context context; +if (ma_context_init(NULL, 0, NULL, &context) != MA_SUCCESS) { +// Error. +} + +ma_device_info* pPlaybackInfos; +ma_uint32 playbackCount; +ma_device_info* pCaptureInfos; +ma_uint32 captureCount; +if (ma_context_get_devices(&context, &pPlaybackInfos, &playbackCount, &pCaptureInfos, &captureCount) != MA_SUCCESS) { +// Error. +} + +// Loop over each device info and do something with it. Here we just print the name with their index. You may want +// to give the user the opportunity to choose which device they'd prefer. +for (ma_uint32 iDevice = 0; iDevice < playbackCount; iDevice += 1) { +printf("%d - %s\n", iDevice, pPlaybackInfos[iDevice].name); +} + +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.pDeviceID = &pPlaybackInfos[chosenPlaybackDeviceIndex].id; +config.playback.format = MY_FORMAT; +config.playback.channels = MY_CHANNEL_COUNT; +config.sampleRate = MY_SAMPLE_RATE; +config.dataCallback = data_callback; +config.pUserData = pMyCustomData; + +ma_device device; +if (ma_device_init(&context, &config, &device) != MA_SUCCESS) { +// Error +} + +... + +ma_device_uninit(&device); +ma_context_uninit(&context); +``` + +The first thing we do in this example is initialize a `ma_context` object with `ma_context_init()`. +The first parameter is a pointer to a list of `ma_backend` values which are used to override the +default backend priorities. When this is NULL, as in this example, miniaudio's default priorities +are used. The second parameter is the number of backends listed in the array pointed to by the +first parameter. The third parameter is a pointer to a `ma_context_config` object which can be +NULL, in which case defaults are used. The context configuration is used for setting the logging +callback, custom memory allocation callbacks, user-defined data and some backend-specific +configurations. + +Once the context has been initialized you can enumerate devices. In the example above we use the +simpler `ma_context_get_devices()`, however you can also use a callback for handling devices by +using `ma_context_enumerate_devices()`. When using `ma_context_get_devices()` you provide a pointer +to a pointer that will, upon output, be set to a pointer to a buffer containing a list of +`ma_device_info` structures. You also provide a pointer to an unsigned integer that will receive +the number of items in the returned buffer. Do not free the returned buffers as their memory is +managed internally by miniaudio. + +The `ma_device_info` structure contains an `id` member which is the ID you pass to the device +config. It also contains the name of the device which is useful for presenting a list of devices +to the user via the UI. + +When creating your own context you will want to pass it to `ma_device_init()` when initializing the +device. Passing in NULL, like we do in the first example, will result in miniaudio creating the +context for you, which you don't want to do since you've already created a context. Note that +internally the context is only tracked by it's pointer which means you must not change the location +of the `ma_context` object. If this is an issue, consider using `malloc()` to allocate memory for +the context. + + +1.2. High Level API +------------------- +The high level API consists of three main parts: + +* Resource management for loading and streaming sounds. +* A node graph for advanced mixing and effect processing. +* A high level "engine" that wraps around the resource manager and node graph. + +The resource manager (`ma_resource_manager`) is used for loading sounds. It supports loading sounds +fully into memory and also streaming. It will also deal with reference counting for you which +avoids the same sound being loaded multiple times. + +The node graph is used for mixing and effect processing. The idea is that you connect a number of +nodes into the graph by connecting each node's outputs to another node's inputs. Each node can +implement it's own effect. By chaining nodes together, advanced mixing and effect processing can +be achieved. + +The engine encapsulates both the resource manager and the node graph to create a simple, easy to +use high level API. The resource manager and node graph APIs are covered in more later sections of +this manual. + +The code below shows how you can initialize an engine using it's default configuration. + +```c +ma_result result; +ma_engine engine; + +result = ma_engine_init(NULL, &engine); +if (result != MA_SUCCESS) { +return result; // Failed to initialize the engine. +} +``` + +This creates an engine instance which will initialize a device internally which you can access with +`ma_engine_get_device()`. It will also initialize a resource manager for you which can be accessed +with `ma_engine_get_resource_manager()`. The engine itself is a node graph (`ma_node_graph`) which +means you can pass a pointer to the engine object into any of the `ma_node_graph` APIs (with a +cast). Alternatively, you can use `ma_engine_get_node_graph()` instead of a cast. + +Note that all objects in miniaudio, including the `ma_engine` object in the example above, are +transparent structures. There are no handles to opaque structures in miniaudio which means you need +to be mindful of how you declare them. In the example above we are declaring it on the stack, but +this will result in the struct being invalidated once the function encapsulating it returns. If +allocating the engine on the heap is more appropriate, you can easily do so with a standard call +to `malloc()` or whatever heap allocation routine you like: + +```c +ma_engine* pEngine = malloc(sizeof(*pEngine)); +``` + +The `ma_engine` API uses the same config/init pattern used all throughout miniaudio. To configure +an engine, you can fill out a `ma_engine_config` object and pass it into the first parameter of +`ma_engine_init()`: + +```c +ma_result result; +ma_engine engine; +ma_engine_config engineConfig; + +engineConfig = ma_engine_config_init(); +engineConfig.pResourceManager = &myCustomResourceManager; // <-- Initialized as some earlier stage. + +result = ma_engine_init(&engineConfig, &engine); +if (result != MA_SUCCESS) { +return result; +} +``` + +This creates an engine instance using a custom config. In this particular example it's showing how +you can specify a custom resource manager rather than having the engine initialize one internally. +This is particularly useful if you want to have multiple engine's share the same resource manager. + +The engine must be uninitialized with `ma_engine_uninit()` when it's no longer needed. + +By default the engine will be started, but nothing will be playing because no sounds have been +initialized. The easiest but least flexible way of playing a sound is like so: + +```c +ma_engine_play_sound(&engine, "my_sound.wav", NULL); +``` + +This plays what miniaudio calls an "inline" sound. It plays the sound once, and then puts the +internal sound up for recycling. The last parameter is used to specify which sound group the sound +should be associated with which will be explained later. This particular way of playing a sound is +simple, but lacks flexibility and features. A more flexible way of playing a sound is to first +initialize a sound: + +```c +ma_result result; +ma_sound sound; + +result = ma_sound_init_from_file(&engine, "my_sound.wav", 0, NULL, NULL, &sound); +if (result != MA_SUCCESS) { +return result; +} + +ma_sound_start(&sound); +``` + +This returns a `ma_sound` object which represents a single instance of the specified sound file. If +you want to play the same file multiple times simultaneously, you need to create one sound for each +instance. + +Sounds should be uninitialized with `ma_sound_uninit()`. + +Sounds are not started by default. Start a sound with `ma_sound_start()` and stop it with +`ma_sound_stop()`. When a sound is stopped, it is not rewound to the start. Use +`ma_sound_seek_to_pcm_frame(&sound, 0)` to seek back to the start of a sound. By default, starting +and stopping sounds happens immediately, but sometimes it might be convenient to schedule the sound +the be started and/or stopped at a specific time. This can be done with the following functions: + +```c +ma_sound_set_start_time_in_pcm_frames() +ma_sound_set_start_time_in_milliseconds() +ma_sound_set_stop_time_in_pcm_frames() +ma_sound_set_stop_time_in_milliseconds() +``` + +The start/stop time needs to be specified based on the absolute timer which is controlled by the +engine. The current global time time in PCM frames can be retrieved with +`ma_engine_get_time_in_pcm_frames()`. The engine's global time can be changed with +`ma_engine_set_time_in_pcm_frames()` for synchronization purposes if required. Note that scheduling +a start time still requires an explicit call to `ma_sound_start()` before anything will play: + +```c +ma_sound_set_start_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 2); +ma_sound_start(&sound); +``` + +The third parameter of `ma_sound_init_from_file()` is a set of flags that control how the sound be +loaded and a few options on which features should be enabled for that sound. By default, the sound +is synchronously loaded fully into memory straight from the file system without any kind of +decoding. If you want to decode the sound before storing it in memory, you need to specify the +`MA_SOUND_FLAG_DECODE` flag. This is useful if you want to incur the cost of decoding at an earlier +stage, such as a loading stage. Without this option, decoding will happen dynamically at mixing +time which might be too expensive on the audio thread. + +If you want to load the sound asynchronously, you can specify the `MA_SOUND_FLAG_ASYNC` flag. This +will result in `ma_sound_init_from_file()` returning quickly, but the sound will not start playing +until the sound has had some audio decoded. + +The fourth parameter is a pointer to sound group. A sound group is used as a mechanism to organise +sounds into groups which have their own effect processing and volume control. An example is a game +which might have separate groups for sfx, voice and music. Each of these groups have their own +independent volume control. Use `ma_sound_group_init()` or `ma_sound_group_init_ex()` to initialize +a sound group. + +Sounds and sound groups are nodes in the engine's node graph and can be plugged into any `ma_node` +API. This makes it possible to connect sounds and sound groups to effect nodes to produce complex +effect chains. + +A sound can have it's volume changed with `ma_sound_set_volume()`. If you prefer decibel volume +control you can use `ma_volume_db_to_linear()` to convert from decibel representation to linear. + +Panning and pitching is supported with `ma_sound_set_pan()` and `ma_sound_set_pitch()`. If you know +a sound will never have it's pitch changed with `ma_sound_set_pitch()` or via the doppler effect, +you can specify the `MA_SOUND_FLAG_NO_PITCH` flag when initializing the sound for an optimization. + +By default, sounds and sound groups have spatialization enabled. If you don't ever want to +spatialize your sounds, initialize the sound with the `MA_SOUND_FLAG_NO_SPATIALIZATION` flag. The +spatialization model is fairly simple and is roughly on feature parity with OpenAL. HRTF and +environmental occlusion are not currently supported, but planned for the future. The supported +features include: + +* Sound and listener positioning and orientation with cones +* Attenuation models: none, inverse, linear and exponential +* Doppler effect + +Sounds can be faded in and out with `ma_sound_set_fade_in_pcm_frames()`. + +To check if a sound is currently playing, you can use `ma_sound_is_playing()`. To check if a sound +is at the end, use `ma_sound_at_end()`. Looping of a sound can be controlled with +`ma_sound_set_looping()`. Use `ma_sound_is_looping()` to check whether or not the sound is looping. + + + +2. Building +=========== +miniaudio should work cleanly out of the box without the need to download or install any +dependencies. See below for platform-specific details. + +Note that GCC and Clang require `-msse2`, `-mavx2`, etc. for SIMD optimizations. + +If you get errors about undefined references to `__sync_val_compare_and_swap_8`, `__atomic_load_8`, +etc. you need to link with `-latomic`. + + +2.1. Windows +------------ +The Windows build should compile cleanly on all popular compilers without the need to configure any +include paths nor link to any libraries. + +The UWP build may require linking to mmdevapi.lib if you get errors about an unresolved external +symbol for `ActivateAudioInterfaceAsync()`. + + +2.2. macOS and iOS +------------------ +The macOS build should compile cleanly without the need to download any dependencies nor link to +any libraries or frameworks. The iOS build needs to be compiled as Objective-C and will need to +link the relevant frameworks but should compile cleanly out of the box with Xcode. Compiling +through the command line requires linking to `-lpthread` and `-lm`. + +Due to the way miniaudio links to frameworks at runtime, your application may not pass Apple's +notarization process. To fix this there are two options. The first is to use the +`MA_NO_RUNTIME_LINKING` option, like so: + +```c +#ifdef __APPLE__ +#define MA_NO_RUNTIME_LINKING +#endif +#define MINIAUDIO_IMPLEMENTATION +#include "miniaudio.h" +``` + +This will require linking with `-framework CoreFoundation -framework CoreAudio -framework AudioToolbox`. +If you get errors about AudioToolbox, try with `-framework AudioUnit` instead. You may get this when +using older versions of iOS. Alternatively, if you would rather keep using runtime linking you can +add the following to your entitlements.xcent file: + +``` +com.apple.security.cs.allow-dyld-environment-variables + +com.apple.security.cs.allow-unsigned-executable-memory + +``` + +See this discussion for more info: https://github.com/mackron/miniaudio/issues/203. + + +2.3. Linux +---------- +The Linux build only requires linking to `-ldl`, `-lpthread` and `-lm`. You do not need any +development packages. You may need to link with `-latomic` if you're compiling for 32-bit ARM. + + +2.4. BSD +-------- +The BSD build only requires linking to `-lpthread` and `-lm`. NetBSD uses audio(4), OpenBSD uses +sndio and FreeBSD uses OSS. You may need to link with `-latomic` if you're compiling for 32-bit +ARM. + + +2.5. Android +------------ +AAudio is the highest priority backend on Android. This should work out of the box without needing +any kind of compiler configuration. Support for AAudio starts with Android 8 which means older +versions will fall back to OpenSL|ES which requires API level 16+. + +There have been reports that the OpenSL|ES backend fails to initialize on some Android based +devices due to `dlopen()` failing to open "libOpenSLES.so". If this happens on your platform +you'll need to disable run-time linking with `MA_NO_RUNTIME_LINKING` and link with -lOpenSLES. + + +2.6. Emscripten +--------------- +The Emscripten build emits Web Audio JavaScript directly and should compile cleanly out of the box. +You cannot use `-std=c*` compiler flags, nor `-ansi`. + +You can enable the use of AudioWorkets by defining `MA_ENABLE_AUDIO_WORKLETS` and then compiling +with the following options: + +-sAUDIO_WORKLET=1 -sWASM_WORKERS=1 -sASYNCIFY + +An example for compiling with AudioWorklet support might look like this: + +emcc program.c -o bin/program.html -DMA_ENABLE_AUDIO_WORKLETS -sAUDIO_WORKLET=1 -sWASM_WORKERS=1 -sASYNCIFY + +To run locally, you'll need to use emrun: + +emrun bin/program.html + + + +2.7. Build Options +------------------ +`#define` these options before including miniaudio.h. + ++----------------------------------+--------------------------------------------------------------------+ +| Option | Description | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_WASAPI | Disables the WASAPI backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_DSOUND | Disables the DirectSound backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_WINMM | Disables the WinMM backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_ALSA | Disables the ALSA backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_PULSEAUDIO | Disables the PulseAudio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_JACK | Disables the JACK backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_COREAUDIO | Disables the Core Audio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_SNDIO | Disables the sndio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_AUDIO4 | Disables the audio(4) backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_OSS | Disables the OSS backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_AAUDIO | Disables the AAudio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_OPENSL | Disables the OpenSL|ES backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_WEBAUDIO | Disables the Web Audio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_NULL | Disables the null backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_ONLY_SPECIFIC_BACKENDS | Disables all backends by default and requires `MA_ENABLE_*` to | +| | enable specific backends. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_WASAPI | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the WASAPI backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_DSOUND | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the DirectSound backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_WINMM | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the WinMM backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_ALSA | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the ALSA backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_PULSEAUDIO | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the PulseAudio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_JACK | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the JACK backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_COREAUDIO | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the Core Audio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_SNDIO | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the sndio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_AUDIO4 | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the audio(4) backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_OSS | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the OSS backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_AAUDIO | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the AAudio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_OPENSL | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the OpenSL|ES backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_WEBAUDIO | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the Web Audio backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_ENABLE_NULL | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to | +| | enable the null backend. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_DECODING | Disables decoding APIs. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_ENCODING | Disables encoding APIs. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_WAV | Disables the built-in WAV decoder and encoder. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_FLAC | Disables the built-in FLAC decoder. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_MP3 | Disables the built-in MP3 decoder. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_DEVICE_IO | Disables playback and recording. This will disable `ma_context` | +| | and `ma_device` APIs. This is useful if you only want to use | +| | miniaudio's data conversion and/or decoding APIs. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_RESOURCE_MANAGER | Disables the resource manager. When using the engine this will | +| | also disable the following functions: | +| | | +| | ``` | +| | ma_sound_init_from_file() | +| | ma_sound_init_from_file_w() | +| | ma_sound_init_copy() | +| | ma_engine_play_sound_ex() | +| | ma_engine_play_sound() | +| | ``` | +| | | +| | The only way to initialize a `ma_sound` object is to initialize it | +| | from a data source. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_NODE_GRAPH | Disables the node graph API. This will also disable the engine API | +| | because it depends on the node graph. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_ENGINE | Disables the engine API. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_THREADING | Disables the `ma_thread`, `ma_mutex`, `ma_semaphore` and | +| | `ma_event` APIs. This option is useful if you only need to use | +| | miniaudio for data conversion, decoding and/or encoding. Some | +| | families of APIs require threading which means the following | +| | options must also be set: | +| | | +| | ``` | +| | MA_NO_DEVICE_IO | +| | ``` | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_GENERATION | Disables generation APIs such a `ma_waveform` and `ma_noise`. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_SSE2 | Disables SSE2 optimizations. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_AVX2 | Disables AVX2 optimizations. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_NEON | Disables NEON optimizations. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_NO_RUNTIME_LINKING | Disables runtime linking. This is useful for passing Apple's | +| | notarization process. When enabling this, you may need to avoid | +| | using `-std=c89` or `-std=c99` on Linux builds or else you may end | +| | up with compilation errors due to conflicts with `timespec` and | +| | `timeval` data types. | +| | | +| | You may need to enable this if your target platform does not allow | +| | runtime linking via `dlopen()`. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_DEBUG_OUTPUT | Enable `printf()` output of debug logs (`MA_LOG_LEVEL_DEBUG`). | ++----------------------------------+--------------------------------------------------------------------+ +| MA_COINIT_VALUE | Windows only. The value to pass to internal calls to | +| | `CoInitializeEx()`. Defaults to `COINIT_MULTITHREADED`. | ++----------------------------------+--------------------------------------------------------------------+ +| MA_API | Controls how public APIs should be decorated. Default is `extern`. | ++----------------------------------+--------------------------------------------------------------------+ + + +3. Definitions +============== +This section defines common terms used throughout miniaudio. Unfortunately there is often ambiguity +in the use of terms throughout the audio space, so this section is intended to clarify how miniaudio +uses each term. + +3.1. Sample +----------- +A sample is a single unit of audio data. If the sample format is f32, then one sample is one 32-bit +floating point number. + +3.2. Frame / PCM Frame +---------------------- +A frame is a group of samples equal to the number of channels. For a stereo stream a frame is 2 +samples, a mono frame is 1 sample, a 5.1 surround sound frame is 6 samples, etc. The terms "frame" +and "PCM frame" are the same thing in miniaudio. Note that this is different to a compressed frame. +If ever miniaudio needs to refer to a compressed frame, such as a FLAC frame, it will always +clarify what it's referring to with something like "FLAC frame". + +3.3. Channel +------------ +A stream of monaural audio that is emitted from an individual speaker in a speaker system, or +received from an individual microphone in a microphone system. A stereo stream has two channels (a +left channel, and a right channel), a 5.1 surround sound system has 6 channels, etc. Some audio +systems refer to a channel as a complex audio stream that's mixed with other channels to produce +the final mix - this is completely different to miniaudio's use of the term "channel" and should +not be confused. + +3.4. Sample Rate +---------------- +The sample rate in miniaudio is always expressed in Hz, such as 44100, 48000, etc. It's the number +of PCM frames that are processed per second. + +3.5. Formats +------------ +Throughout miniaudio you will see references to different sample formats: + ++---------------+----------------------------------------+---------------------------+ +| Symbol | Description | Range | ++---------------+----------------------------------------+---------------------------+ +| ma_format_f32 | 32-bit floating point | [-1, 1] | +| ma_format_s16 | 16-bit signed integer | [-32768, 32767] | +| ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607] | +| ma_format_s32 | 32-bit signed integer | [-2147483648, 2147483647] | +| ma_format_u8 | 8-bit unsigned integer | [0, 255] | ++---------------+----------------------------------------+---------------------------+ + +All formats are native-endian. + + + +4. Data Sources +=============== +The data source abstraction in miniaudio is used for retrieving audio data from some source. A few +examples include `ma_decoder`, `ma_noise` and `ma_waveform`. You will need to be familiar with data +sources in order to make sense of some of the higher level concepts in miniaudio. + +The `ma_data_source` API is a generic interface for reading from a data source. Any object that +implements the data source interface can be plugged into any `ma_data_source` function. + +To read data from a data source: + +```c +ma_result result; +ma_uint64 framesRead; + +result = ma_data_source_read_pcm_frames(pDataSource, pFramesOut, frameCount, &framesRead); +if (result != MA_SUCCESS) { +return result; // Failed to read data from the data source. +} +``` + +If you don't need the number of frames that were successfully read you can pass in `NULL` to the +`pFramesRead` parameter. If this returns a value less than the number of frames requested it means +the end of the file has been reached. `MA_AT_END` will be returned only when the number of frames +read is 0. + +When calling any data source function, with the exception of `ma_data_source_init()` and +`ma_data_source_uninit()`, you can pass in any object that implements a data source. For example, +you could plug in a decoder like so: + +```c +ma_result result; +ma_uint64 framesRead; +ma_decoder decoder; // <-- This would be initialized with `ma_decoder_init_*()`. + +result = ma_data_source_read_pcm_frames(&decoder, pFramesOut, frameCount, &framesRead); +if (result != MA_SUCCESS) { +return result; // Failed to read data from the decoder. +} +``` + +If you want to seek forward you can pass in `NULL` to the `pFramesOut` parameter. Alternatively you +can use `ma_data_source_seek_pcm_frames()`. + +To seek to a specific PCM frame: + +```c +result = ma_data_source_seek_to_pcm_frame(pDataSource, frameIndex); +if (result != MA_SUCCESS) { +return result; // Failed to seek to PCM frame. +} +``` + +You can retrieve the total length of a data source in PCM frames, but note that some data sources +may not have the notion of a length, such as noise and waveforms, and others may just not have a +way of determining the length such as some decoders. To retrieve the length: + +```c +ma_uint64 length; + +result = ma_data_source_get_length_in_pcm_frames(pDataSource, &length); +if (result != MA_SUCCESS) { +return result; // Failed to retrieve the length. +} +``` + +Care should be taken when retrieving the length of a data source where the underlying decoder is +pulling data from a data stream with an undefined length, such as internet radio or some kind of +broadcast. If you do this, `ma_data_source_get_length_in_pcm_frames()` may never return. + +The current position of the cursor in PCM frames can also be retrieved: + +```c +ma_uint64 cursor; + +result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursor); +if (result != MA_SUCCESS) { +return result; // Failed to retrieve the cursor. +} +``` + +You will often need to know the data format that will be returned after reading. This can be +retrieved like so: + +```c +ma_format format; +ma_uint32 channels; +ma_uint32 sampleRate; +ma_channel channelMap[MA_MAX_CHANNELS]; + +result = ma_data_source_get_data_format(pDataSource, &format, &channels, &sampleRate, channelMap, MA_MAX_CHANNELS); +if (result != MA_SUCCESS) { +return result; // Failed to retrieve data format. +} +``` + +If you do not need a specific data format property, just pass in NULL to the respective parameter. + +There may be cases where you want to implement something like a sound bank where you only want to +read data within a certain range of the underlying data. To do this you can use a range: + +```c +result = ma_data_source_set_range_in_pcm_frames(pDataSource, rangeBegInFrames, rangeEndInFrames); +if (result != MA_SUCCESS) { +return result; // Failed to set the range. +} +``` + +This is useful if you have a sound bank where many sounds are stored in the same file and you want +the data source to only play one of those sub-sounds. Note that once the range is set, everything +that takes a position, such as cursors and loop points, should always be relatvie to the start of +the range. When the range is set, any previously defined loop point will be reset. + +Custom loop points can also be used with data sources. By default, data sources will loop after +they reach the end of the data source, but if you need to loop at a specific location, you can do +the following: + +```c +result = ma_data_set_loop_point_in_pcm_frames(pDataSource, loopBegInFrames, loopEndInFrames); +if (result != MA_SUCCESS) { +return result; // Failed to set the loop point. +} +``` + +The loop point is relative to the current range. + +It's sometimes useful to chain data sources together so that a seamless transition can be achieved. +To do this, you can use chaining: + +```c +ma_decoder decoder1; +ma_decoder decoder2; + +// ... initialize decoders with ma_decoder_init_*() ... + +result = ma_data_source_set_next(&decoder1, &decoder2); +if (result != MA_SUCCESS) { +return result; // Failed to set the next data source. +} + +result = ma_data_source_read_pcm_frames(&decoder1, pFramesOut, frameCount, pFramesRead); +if (result != MA_SUCCESS) { +return result; // Failed to read from the decoder. +} +``` + +In the example above we're using decoders. When reading from a chain, you always want to read from +the top level data source in the chain. In the example above, `decoder1` is the top level data +source in the chain. When `decoder1` reaches the end, `decoder2` will start seamlessly without any +gaps. + +Note that when looping is enabled, only the current data source will be looped. You can loop the +entire chain by linking in a loop like so: + +```c +ma_data_source_set_next(&decoder1, &decoder2); // decoder1 -> decoder2 +ma_data_source_set_next(&decoder2, &decoder1); // decoder2 -> decoder1 (loop back to the start). +``` + +Note that setting up chaining is not thread safe, so care needs to be taken if you're dynamically +changing links while the audio thread is in the middle of reading. + +Do not use `ma_decoder_seek_to_pcm_frame()` as a means to reuse a data source to play multiple +instances of the same sound simultaneously. This can be extremely inefficient depending on the type +of data source and can result in glitching due to subtle changes to the state of internal filters. +Instead, initialize multiple data sources for each instance. + + +4.1. Custom Data Sources +------------------------ +You can implement a custom data source by implementing the functions in `ma_data_source_vtable`. +Your custom object must have `ma_data_source_base` as it's first member: + +```c +struct my_data_source +{ +ma_data_source_base base; +... +}; +``` + +In your initialization routine, you need to call `ma_data_source_init()` in order to set up the +base object (`ma_data_source_base`): + +```c +static ma_result my_data_source_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ +// Read data here. Output in the same format returned by my_data_source_get_data_format(). +} + +static ma_result my_data_source_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ +// Seek to a specific PCM frame here. Return MA_NOT_IMPLEMENTED if seeking is not supported. +} + +static ma_result my_data_source_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ +// Return the format of the data here. +} + +static ma_result my_data_source_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ +// Retrieve the current position of the cursor here. Return MA_NOT_IMPLEMENTED and set *pCursor to 0 if there is no notion of a cursor. +} + +static ma_result my_data_source_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ +// Retrieve the length in PCM frames here. Return MA_NOT_IMPLEMENTED and set *pLength to 0 if there is no notion of a length or if the length is unknown. +} + +static ma_data_source_vtable g_my_data_source_vtable = +{ +my_data_source_read, +my_data_source_seek, +my_data_source_get_data_format, +my_data_source_get_cursor, +my_data_source_get_length +}; + +ma_result my_data_source_init(my_data_source* pMyDataSource) +{ +ma_result result; +ma_data_source_config baseConfig; + +baseConfig = ma_data_source_config_init(); +baseConfig.vtable = &g_my_data_source_vtable; + +result = ma_data_source_init(&baseConfig, &pMyDataSource->base); +if (result != MA_SUCCESS) { +return result; +} + +// ... do the initialization of your custom data source here ... + +return MA_SUCCESS; +} + +void my_data_source_uninit(my_data_source* pMyDataSource) +{ +// ... do the uninitialization of your custom data source here ... + +// You must uninitialize the base data source. +ma_data_source_uninit(&pMyDataSource->base); +} +``` + +Note that `ma_data_source_init()` and `ma_data_source_uninit()` are never called directly outside +of the custom data source. It's up to the custom data source itself to call these within their own +init/uninit functions. + + + +5. Engine +========= +The `ma_engine` API is a high level API for managing and mixing sounds and effect processing. The +`ma_engine` object encapsulates a resource manager and a node graph, both of which will be +explained in more detail later. + +Sounds are called `ma_sound` and are created from an engine. Sounds can be associated with a mixing +group called `ma_sound_group` which are also created from the engine. Both `ma_sound` and +`ma_sound_group` objects are nodes within the engine's node graph. + +When the engine is initialized, it will normally create a device internally. If you would rather +manage the device yourself, you can do so and just pass a pointer to it via the engine config when +you initialize the engine. You can also just use the engine without a device, which again can be +configured via the engine config. + +The most basic way to initialize the engine is with a default config, like so: + +```c +ma_result result; +ma_engine engine; + +result = ma_engine_init(NULL, &engine); +if (result != MA_SUCCESS) { +return result; // Failed to initialize the engine. +} +``` + +This will result in the engine initializing a playback device using the operating system's default +device. This will be sufficient for many use cases, but if you need more flexibility you'll want to +configure the engine with an engine config: + +```c +ma_result result; +ma_engine engine; +ma_engine_config engineConfig; + +engineConfig = ma_engine_config_init(); +engineConfig.pDevice = &myDevice; + +result = ma_engine_init(&engineConfig, &engine); +if (result != MA_SUCCESS) { +return result; // Failed to initialize the engine. +} +``` + +In the example above we're passing in a pre-initialized device. Since the caller is the one in +control of the device's data callback, it's their responsibility to manually call +`ma_engine_read_pcm_frames()` from inside their data callback: + +```c +void playback_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) +{ +ma_engine_read_pcm_frames(&g_Engine, pOutput, frameCount, NULL); +} +``` + +You can also use the engine independent of a device entirely: + +```c +ma_result result; +ma_engine engine; +ma_engine_config engineConfig; + +engineConfig = ma_engine_config_init(); +engineConfig.noDevice = MA_TRUE; +engineConfig.channels = 2; // Must be set when not using a device. +engineConfig.sampleRate = 48000; // Must be set when not using a device. + +result = ma_engine_init(&engineConfig, &engine); +if (result != MA_SUCCESS) { +return result; // Failed to initialize the engine. +} +``` + +Note that when you're not using a device, you must set the channel count and sample rate in the +config or else miniaudio won't know what to use (miniaudio will use the device to determine this +normally). When not using a device, you need to use `ma_engine_read_pcm_frames()` to process audio +data from the engine. This kind of setup is useful if you want to do something like offline +processing or want to use a different audio system for playback such as SDL. + +When a sound is loaded it goes through a resource manager. By default the engine will initialize a +resource manager internally, but you can also specify a pre-initialized resource manager: + +```c +ma_result result; +ma_engine engine1; +ma_engine engine2; +ma_engine_config engineConfig; + +engineConfig = ma_engine_config_init(); +engineConfig.pResourceManager = &myResourceManager; + +ma_engine_init(&engineConfig, &engine1); +ma_engine_init(&engineConfig, &engine2); +``` + +In this example we are initializing two engines, both of which are sharing the same resource +manager. This is especially useful for saving memory when loading the same file across multiple +engines. If you were not to use a shared resource manager, each engine instance would use their own +which would result in any sounds that are used between both engine's being loaded twice. By using +a shared resource manager, it would only be loaded once. Using multiple engine's is useful when you +need to output to multiple playback devices, such as in a local multiplayer game where each player +is using their own set of headphones. + +By default an engine will be in a started state. To make it so the engine is not automatically +started you can configure it as such: + +```c +engineConfig.noAutoStart = MA_TRUE; + +// The engine will need to be started manually. +ma_engine_start(&engine); + +// Later on the engine can be stopped with ma_engine_stop(). +ma_engine_stop(&engine); +``` + +The concept of starting or stopping an engine is only relevant when using the engine with a +device. Attempting to start or stop an engine that is not associated with a device will result in +`MA_INVALID_OPERATION`. + +The master volume of the engine can be controlled with `ma_engine_set_volume()` which takes a +linear scale, with 0 resulting in silence and anything above 1 resulting in amplification. If you +prefer decibel based volume control, use `ma_volume_db_to_linear()` to convert from dB to linear. + +When a sound is spatialized, it is done so relative to a listener. An engine can be configured to +have multiple listeners which can be configured via the config: + +```c +engineConfig.listenerCount = 2; +``` + +The maximum number of listeners is restricted to `MA_ENGINE_MAX_LISTENERS`. By default, when a +sound is spatialized, it will be done so relative to the closest listener. You can also pin a sound +to a specific listener which will be explained later. Listener's have a position, direction, cone, +and velocity (for doppler effect). A listener is referenced by an index, the meaning of which is up +to the caller (the index is 0 based and cannot go beyond the listener count, minus 1). The +position, direction and velocity are all specified in absolute terms: + +```c +ma_engine_listener_set_position(&engine, listenerIndex, worldPosX, worldPosY, worldPosZ); +``` + +The direction of the listener represents it's forward vector. The listener's up vector can also be +specified and defaults to +1 on the Y axis. + +```c +ma_engine_listener_set_direction(&engine, listenerIndex, forwardX, forwardY, forwardZ); +ma_engine_listener_set_world_up(&engine, listenerIndex, 0, 1, 0); +``` + +The engine supports directional attenuation. The listener can have a cone the controls how sound is +attenuated based on the listener's direction. When a sound is between the inner and outer cones, it +will be attenuated between 1 and the cone's outer gain: + +```c +ma_engine_listener_set_cone(&engine, listenerIndex, innerAngleInRadians, outerAngleInRadians, outerGain); +``` + +When a sound is inside the inner code, no directional attenuation is applied. When the sound is +outside of the outer cone, the attenuation will be set to `outerGain` in the example above. When +the sound is in between the inner and outer cones, the attenuation will be interpolated between 1 +and the outer gain. + +The engine's coordinate system follows the OpenGL coordinate system where positive X points right, +positive Y points up and negative Z points forward. + +The simplest and least flexible way to play a sound is like so: + +```c +ma_engine_play_sound(&engine, "my_sound.wav", pGroup); +``` + +This is a "fire and forget" style of function. The engine will manage the `ma_sound` object +internally. When the sound finishes playing, it'll be put up for recycling. For more flexibility +you'll want to initialize a sound object: + +```c +ma_sound sound; + +result = ma_sound_init_from_file(&engine, "my_sound.wav", flags, pGroup, NULL, &sound); +if (result != MA_SUCCESS) { +return result; // Failed to load sound. +} +``` + +Sounds need to be uninitialized with `ma_sound_uninit()`. + +The example above loads a sound from a file. If the resource manager has been disabled you will not +be able to use this function and instead you'll need to initialize a sound directly from a data +source: + +```c +ma_sound sound; + +result = ma_sound_init_from_data_source(&engine, &dataSource, flags, pGroup, &sound); +if (result != MA_SUCCESS) { +return result; +} +``` + +Each `ma_sound` object represents a single instance of the sound. If you want to play the same +sound multiple times at the same time, you need to initialize a separate `ma_sound` object. + +For the most flexibility when initializing sounds, use `ma_sound_init_ex()`. This uses miniaudio's +standard config/init pattern: + +```c +ma_sound sound; +ma_sound_config soundConfig; + +soundConfig = ma_sound_config_init(); +soundConfig.pFilePath = NULL; // Set this to load from a file path. +soundConfig.pDataSource = NULL; // Set this to initialize from an existing data source. +soundConfig.pInitialAttachment = &someNodeInTheNodeGraph; +soundConfig.initialAttachmentInputBusIndex = 0; +soundConfig.channelsIn = 1; +soundConfig.channelsOut = 0; // Set to 0 to use the engine's native channel count. + +result = ma_sound_init_ex(&soundConfig, &sound); +if (result != MA_SUCCESS) { +return result; +} +``` + +In the example above, the sound is being initialized without a file nor a data source. This is +valid, in which case the sound acts as a node in the middle of the node graph. This means you can +connect other sounds to this sound and allow it to act like a sound group. Indeed, this is exactly +what a `ma_sound_group` is. + +When loading a sound, you specify a set of flags that control how the sound is loaded and what +features are enabled for that sound. When no flags are set, the sound will be fully loaded into +memory in exactly the same format as how it's stored on the file system. The resource manager will +allocate a block of memory and then load the file directly into it. When reading audio data, it +will be decoded dynamically on the fly. In order to save processing time on the audio thread, it +might be beneficial to pre-decode the sound. You can do this with the `MA_SOUND_FLAG_DECODE` flag: + +```c +ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE, pGroup, NULL, &sound); +``` + +By default, sounds will be loaded synchronously, meaning `ma_sound_init_*()` will not return until +the sound has been fully loaded. If this is prohibitive you can instead load sounds asynchronously +by specifying the `MA_SOUND_FLAG_ASYNC` flag: + +```c +ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE | MA_SOUND_FLAG_ASYNC, pGroup, NULL, &sound); +``` + +This will result in `ma_sound_init_*()` returning quickly, but the sound won't yet have been fully +loaded. When you start the sound, it won't output anything until some sound is available. The sound +will start outputting audio before the sound has been fully decoded when the `MA_SOUND_FLAG_DECODE` +is specified. + +If you need to wait for an asynchronously loaded sound to be fully loaded, you can use a fence. A +fence in miniaudio is a simple synchronization mechanism which simply blocks until it's internal +counter hit's zero. You can specify a fence like so: + +```c +ma_result result; +ma_fence fence; +ma_sound sounds[4]; + +result = ma_fence_init(&fence); +if (result != MA_SUCCESS) { +return result; +} + +// Load some sounds asynchronously. +for (int iSound = 0; iSound < 4; iSound += 1) { +ma_sound_init_from_file(&engine, mySoundFilesPaths[iSound], MA_SOUND_FLAG_DECODE | MA_SOUND_FLAG_ASYNC, pGroup, &fence, &sounds[iSound]); +} + +// ... do some other stuff here in the mean time ... + +// Wait for all sounds to finish loading. +ma_fence_wait(&fence); +``` + +If loading the entire sound into memory is prohibitive, you can also configure the engine to stream +the audio data: + +```c +ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_STREAM, pGroup, NULL, &sound); +``` + +When streaming sounds, 2 seconds worth of audio data is stored in memory. Although it should work +fine, it's inefficient to use streaming for short sounds. Streaming is useful for things like music +tracks in games. + +When loading a sound from a file path, the engine will reference count the file to prevent it from +being loaded if it's already in memory. When you uninitialize a sound, the reference count will be +decremented, and if it hits zero, the sound will be unloaded from memory. This reference counting +system is not used for streams. The engine will use a 64-bit hash of the file name when comparing +file paths which means there's a small chance you might encounter a name collision. If this is an +issue, you'll need to use a different name for one of the colliding file paths, or just not load +from files and instead load from a data source. + +You can use `ma_sound_init_copy()` to initialize a copy of another sound. Note, however, that this +only works for sounds that were initialized with `ma_sound_init_from_file()` and without the +`MA_SOUND_FLAG_STREAM` flag. + +When you initialize a sound, if you specify a sound group the sound will be attached to that group +automatically. If you set it to NULL, it will be automatically attached to the engine's endpoint. +If you would instead rather leave the sound unattached by default, you can can specify the +`MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT` flag. This is useful if you want to set up a complex node +graph. + +Sounds are not started by default. To start a sound, use `ma_sound_start()`. Stop a sound with +`ma_sound_stop()`. + +Sounds can have their volume controlled with `ma_sound_set_volume()` in the same way as the +engine's master volume. + +Sounds support stereo panning and pitching. Set the pan with `ma_sound_set_pan()`. Setting the pan +to 0 will result in an unpanned sound. Setting it to -1 will shift everything to the left, whereas ++1 will shift it to the right. The pitch can be controlled with `ma_sound_set_pitch()`. A larger +value will result in a higher pitch. The pitch must be greater than 0. + +The engine supports 3D spatialization of sounds. By default sounds will have spatialization +enabled, but if a sound does not need to be spatialized it's best to disable it. There are two ways +to disable spatialization of a sound: + +```c +// Disable spatialization at initialization time via a flag: +ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_NO_SPATIALIZATION, NULL, NULL, &sound); + +// Dynamically disable or enable spatialization post-initialization: +ma_sound_set_spatialization_enabled(&sound, isSpatializationEnabled); +``` + +By default sounds will be spatialized based on the closest listener. If a sound should always be +spatialized relative to a specific listener it can be pinned to one: + +```c +ma_sound_set_pinned_listener_index(&sound, listenerIndex); +``` + +Like listeners, sounds have a position. By default, the position of a sound is in absolute space, +but it can be changed to be relative to a listener: + +```c +ma_sound_set_positioning(&sound, ma_positioning_relative); +``` + +Note that relative positioning of a sound only makes sense if there is either only one listener, or +the sound is pinned to a specific listener. To set the position of a sound: + +```c +ma_sound_set_position(&sound, posX, posY, posZ); +``` + +The direction works the same way as a listener and represents the sound's forward direction: + +```c +ma_sound_set_direction(&sound, forwardX, forwardY, forwardZ); +``` + +Sound's also have a cone for controlling directional attenuation. This works exactly the same as +listeners: + +```c +ma_sound_set_cone(&sound, innerAngleInRadians, outerAngleInRadians, outerGain); +``` + +The velocity of a sound is used for doppler effect and can be set as such: + +```c +ma_sound_set_velocity(&sound, velocityX, velocityY, velocityZ); +``` + +The engine supports different attenuation models which can be configured on a per-sound basis. By +default the attenuation model is set to `ma_attenuation_model_inverse` which is the equivalent to +OpenAL's `AL_INVERSE_DISTANCE_CLAMPED`. Configure the attenuation model like so: + +```c +ma_sound_set_attenuation_model(&sound, ma_attenuation_model_inverse); +``` + +The supported attenuation models include the following: + ++----------------------------------+----------------------------------------------+ +| ma_attenuation_model_none | No distance attenuation. | ++----------------------------------+----------------------------------------------+ +| ma_attenuation_model_inverse | Equivalent to `AL_INVERSE_DISTANCE_CLAMPED`. | ++----------------------------------+----------------------------------------------+ +| ma_attenuation_model_linear | Linear attenuation. | ++----------------------------------+----------------------------------------------+ +| ma_attenuation_model_exponential | Exponential attenuation. | ++----------------------------------+----------------------------------------------+ + +To control how quickly a sound rolls off as it moves away from the listener, you need to configure +the rolloff: + +```c +ma_sound_set_rolloff(&sound, rolloff); +``` + +You can control the minimum and maximum gain to apply from spatialization: + +```c +ma_sound_set_min_gain(&sound, minGain); +ma_sound_set_max_gain(&sound, maxGain); +``` + +Likewise, in the calculation of attenuation, you can control the minimum and maximum distances for +the attenuation calculation. This is useful if you want to ensure sounds don't drop below a certain +volume after the listener moves further away and to have sounds play a maximum volume when the +listener is within a certain distance: + +```c +ma_sound_set_min_distance(&sound, minDistance); +ma_sound_set_max_distance(&sound, maxDistance); +``` + +The engine's spatialization system supports doppler effect. The doppler factor can be configure on +a per-sound basis like so: + +```c +ma_sound_set_doppler_factor(&sound, dopplerFactor); +``` + +You can fade sounds in and out with `ma_sound_set_fade_in_pcm_frames()` and +`ma_sound_set_fade_in_milliseconds()`. Set the volume to -1 to use the current volume as the +starting volume: + +```c +// Fade in over 1 second. +ma_sound_set_fade_in_milliseconds(&sound, 0, 1, 1000); + +// ... sometime later ... + +// Fade out over 1 second, starting from the current volume. +ma_sound_set_fade_in_milliseconds(&sound, -1, 0, 1000); +``` + +By default sounds will start immediately, but sometimes for timing and synchronization purposes it +can be useful to schedule a sound to start or stop: + +```c +// Start the sound in 1 second from now. +ma_sound_set_start_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 1)); + +// Stop the sound in 2 seconds from now. +ma_sound_set_stop_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 2)); +``` + +Note that scheduling a start time still requires an explicit call to `ma_sound_start()` before +anything will play. + +The time is specified in global time which is controlled by the engine. You can get the engine's +current time with `ma_engine_get_time_in_pcm_frames()`. The engine's global time is incremented +automatically as audio data is read, but it can be reset with `ma_engine_set_time_in_pcm_frames()` +in case it needs to be resynchronized for some reason. + +To determine whether or not a sound is currently playing, use `ma_sound_is_playing()`. This will +take the scheduled start and stop times into account. + +Whether or not a sound should loop can be controlled with `ma_sound_set_looping()`. Sounds will not +be looping by default. Use `ma_sound_is_looping()` to determine whether or not a sound is looping. + +Use `ma_sound_at_end()` to determine whether or not a sound is currently at the end. For a looping +sound this should never return true. Alternatively, you can configure a callback that will be fired +when the sound reaches the end. Note that the callback is fired from the audio thread which means +you cannot be uninitializing sound from the callback. To set the callback you can use +`ma_sound_set_end_callback()`. Alternatively, if you're using `ma_sound_init_ex()`, you can pass it +into the config like so: + +```c +soundConfig.endCallback = my_end_callback; +soundConfig.pEndCallbackUserData = pMyEndCallbackUserData; +``` + +The end callback is declared like so: + +```c +void my_end_callback(void* pUserData, ma_sound* pSound) +{ +... +} +``` + +Internally a sound wraps around a data source. Some APIs exist to control the underlying data +source, mainly for convenience: + +```c +ma_sound_seek_to_pcm_frame(&sound, frameIndex); +ma_sound_get_data_format(&sound, &format, &channels, &sampleRate, pChannelMap, channelMapCapacity); +ma_sound_get_cursor_in_pcm_frames(&sound, &cursor); +ma_sound_get_length_in_pcm_frames(&sound, &length); +``` + +Sound groups have the same API as sounds, only they are called `ma_sound_group`, and since they do +not have any notion of a data source, anything relating to a data source is unavailable. + +Internally, sound data is loaded via the `ma_decoder` API which means by default it only supports +file formats that have built-in support in miniaudio. You can extend this to support any kind of +file format through the use of custom decoders. To do this you'll need to use a self-managed +resource manager and configure it appropriately. See the "Resource Management" section below for +details on how to set this up. + + +6. Resource Management +====================== +Many programs will want to manage sound resources for things such as reference counting and +streaming. This is supported by miniaudio via the `ma_resource_manager` API. + +The resource manager is mainly responsible for the following: + +* Loading of sound files into memory with reference counting. +* Streaming of sound data. + +When loading a sound file, the resource manager will give you back a `ma_data_source` compatible +object called `ma_resource_manager_data_source`. This object can be passed into any +`ma_data_source` API which is how you can read and seek audio data. When loading a sound file, you +specify whether or not you want the sound to be fully loaded into memory (and optionally +pre-decoded) or streamed. When loading into memory, you can also specify whether or not you want +the data to be loaded asynchronously. + +The example below is how you can initialize a resource manager using it's default configuration: + +```c +ma_resource_manager_config config; +ma_resource_manager resourceManager; + +config = ma_resource_manager_config_init(); +result = ma_resource_manager_init(&config, &resourceManager); +if (result != MA_SUCCESS) { +ma_device_uninit(&device); +printf("Failed to initialize the resource manager."); +return -1; +} +``` + +You can configure the format, channels and sample rate of the decoded audio data. By default it +will use the file's native data format, but you can configure it to use a consistent format. This +is useful for offloading the cost of data conversion to load time rather than dynamically +converting at mixing time. To do this, you configure the decoded format, channels and sample rate +like the code below: + +```c +config = ma_resource_manager_config_init(); +config.decodedFormat = device.playback.format; +config.decodedChannels = device.playback.channels; +config.decodedSampleRate = device.sampleRate; +``` + +In the code above, the resource manager will be configured so that any decoded audio data will be +pre-converted at load time to the device's native data format. If instead you used defaults and +the data format of the file did not match the device's data format, you would need to convert the +data at mixing time which may be prohibitive in high-performance and large scale scenarios like +games. + +Internally the resource manager uses the `ma_decoder` API to load sounds. This means by default it +only supports decoders that are built into miniaudio. It's possible to support additional encoding +formats through the use of custom decoders. To do so, pass in your `ma_decoding_backend_vtable` +vtables into the resource manager config: + +```c +ma_decoding_backend_vtable* pCustomBackendVTables[] = +{ +&g_ma_decoding_backend_vtable_libvorbis, +&g_ma_decoding_backend_vtable_libopus +}; + +... + +resourceManagerConfig.ppCustomDecodingBackendVTables = pCustomBackendVTables; +resourceManagerConfig.customDecodingBackendCount = sizeof(pCustomBackendVTables) / sizeof(pCustomBackendVTables[0]); +resourceManagerConfig.pCustomDecodingBackendUserData = NULL; +``` + +This system can allow you to support any kind of file format. See the "Decoding" section for +details on how to implement custom decoders. The miniaudio repository includes examples for Opus +via libopus and libopusfile and Vorbis via libvorbis and libvorbisfile. + +Asynchronicity is achieved via a job system. When an operation needs to be performed, such as the +decoding of a page, a job will be posted to a queue which will then be processed by a job thread. +By default there will be only one job thread running, but this can be configured, like so: + +```c +config = ma_resource_manager_config_init(); +config.jobThreadCount = MY_JOB_THREAD_COUNT; +``` + +By default job threads are managed internally by the resource manager, however you can also self +manage your job threads if, for example, you want to integrate the job processing into your +existing job infrastructure, or if you simply don't like the way the resource manager does it. To +do this, just set the job thread count to 0 and process jobs manually. To process jobs, you first +need to retrieve a job using `ma_resource_manager_next_job()` and then process it using +`ma_job_process()`: + +```c +config = ma_resource_manager_config_init(); +config.jobThreadCount = 0; // Don't manage any job threads internally. +config.flags = MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING; // Optional. Makes `ma_resource_manager_next_job()` non-blocking. + +// ... Initialize your custom job threads ... + +void my_custom_job_thread(...) +{ +for (;;) { +ma_job job; +ma_result result = ma_resource_manager_next_job(pMyResourceManager, &job); +if (result != MA_SUCCESS) { +if (result == MA_NO_DATA_AVAILABLE) { +// No jobs are available. Keep going. Will only get this if the resource manager was initialized +// with MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING. +continue; +} else if (result == MA_CANCELLED) { +// MA_JOB_TYPE_QUIT was posted. Exit. +break; +} else { +// Some other error occurred. +break; +} +} + +ma_job_process(&job); +} +} +``` + +In the example above, the `MA_JOB_TYPE_QUIT` event is the used as the termination +indicator, but you can use whatever you would like to terminate the thread. The call to +`ma_resource_manager_next_job()` is blocking by default, but can be configured to be non-blocking +by initializing the resource manager with the `MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING` configuration +flag. Note that the `MA_JOB_TYPE_QUIT` will never be removed from the job queue. This +is to give every thread the opportunity to catch the event and terminate naturally. + +When loading a file, it's sometimes convenient to be able to customize how files are opened and +read instead of using standard `fopen()`, `fclose()`, etc. which is what miniaudio will use by +default. This can be done by setting `pVFS` member of the resource manager's config: + +```c +// Initialize your custom VFS object. See documentation for VFS for information on how to do this. +my_custom_vfs vfs = my_custom_vfs_init(); + +config = ma_resource_manager_config_init(); +config.pVFS = &vfs; +``` + +This is particularly useful in programs like games where you want to read straight from an archive +rather than the normal file system. If you do not specify a custom VFS, the resource manager will +use the operating system's normal file operations. + +To load a sound file and create a data source, call `ma_resource_manager_data_source_init()`. When +loading a sound you need to specify the file path and options for how the sounds should be loaded. +By default a sound will be loaded synchronously. The returned data source is owned by the caller +which means the caller is responsible for the allocation and freeing of the data source. Below is +an example for initializing a data source: + +```c +ma_resource_manager_data_source dataSource; +ma_result result = ma_resource_manager_data_source_init(pResourceManager, pFilePath, flags, &dataSource); +if (result != MA_SUCCESS) { +// Error. +} + +// ... + +// A ma_resource_manager_data_source object is compatible with the `ma_data_source` API. To read data, just call +// the `ma_data_source_read_pcm_frames()` like you would with any normal data source. +result = ma_data_source_read_pcm_frames(&dataSource, pDecodedData, frameCount, &framesRead); +if (result != MA_SUCCESS) { +// Failed to read PCM frames. +} + +// ... + +ma_resource_manager_data_source_uninit(pResourceManager, &dataSource); +``` + +The `flags` parameter specifies how you want to perform loading of the sound file. It can be a +combination of the following flags: + +``` +MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM +MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE +MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC +MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT +``` + +When no flags are specified (set to 0), the sound will be fully loaded into memory, but not +decoded, meaning the raw file data will be stored in memory, and then dynamically decoded when +`ma_data_source_read_pcm_frames()` is called. To instead decode the audio data before storing it in +memory, use the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE` flag. By default, the sound file will +be loaded synchronously, meaning `ma_resource_manager_data_source_init()` will only return after +the entire file has been loaded. This is good for simplicity, but can be prohibitively slow. You +can instead load the sound asynchronously using the `MA_RESOURCE_MANAGER_DATA_SOURCE_ASYNC` flag. +This will result in `ma_resource_manager_data_source_init()` returning quickly, but no data will be +returned by `ma_data_source_read_pcm_frames()` until some data is available. When no data is +available because the asynchronous decoding hasn't caught up, `MA_BUSY` will be returned by +`ma_data_source_read_pcm_frames()`. + +For large sounds, it's often prohibitive to store the entire file in memory. To mitigate this, you +can instead stream audio data which you can do by specifying the +`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag. When streaming, data will be decoded in 1 +second pages. When a new page needs to be decoded, a job will be posted to the job queue and then +subsequently processed in a job thread. + +For in-memory sounds, reference counting is used to ensure the data is loaded only once. This means +multiple calls to `ma_resource_manager_data_source_init()` with the same file path will result in +the file data only being loaded once. Each call to `ma_resource_manager_data_source_init()` must be +matched up with a call to `ma_resource_manager_data_source_uninit()`. Sometimes it can be useful +for a program to register self-managed raw audio data and associate it with a file path. Use the +`ma_resource_manager_register_*()` and `ma_resource_manager_unregister_*()` APIs to do this. +`ma_resource_manager_register_decoded_data()` is used to associate a pointer to raw, self-managed +decoded audio data in the specified data format with the specified name. Likewise, +`ma_resource_manager_register_encoded_data()` is used to associate a pointer to raw self-managed +encoded audio data (the raw file data) with the specified name. Note that these names need not be +actual file paths. When `ma_resource_manager_data_source_init()` is called (without the +`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag), the resource manager will look for these +explicitly registered data buffers and, if found, will use it as the backing data for the data +source. Note that the resource manager does *not* make a copy of this data so it is up to the +caller to ensure the pointer stays valid for it's lifetime. Use +`ma_resource_manager_unregister_data()` to unregister the self-managed data. You can also use +`ma_resource_manager_register_file()` and `ma_resource_manager_unregister_file()` to register and +unregister a file. It does not make sense to use the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` +flag with a self-managed data pointer. + + +6.1. Asynchronous Loading and Synchronization +--------------------------------------------- +When loading asynchronously, it can be useful to poll whether or not loading has finished. Use +`ma_resource_manager_data_source_result()` to determine this. For in-memory sounds, this will +return `MA_SUCCESS` when the file has been *entirely* decoded. If the sound is still being decoded, +`MA_BUSY` will be returned. Otherwise, some other error code will be returned if the sound failed +to load. For streaming data sources, `MA_SUCCESS` will be returned when the first page has been +decoded and the sound is ready to be played. If the first page is still being decoded, `MA_BUSY` +will be returned. Otherwise, some other error code will be returned if the sound failed to load. + +In addition to polling, you can also use a simple synchronization object called a "fence" to wait +for asynchronously loaded sounds to finish. This is called `ma_fence`. The advantage to using a +fence is that it can be used to wait for a group of sounds to finish loading rather than waiting +for sounds on an individual basis. There are two stages to loading a sound: + +* Initialization of the internal decoder; and +* Completion of decoding of the file (the file is fully decoded) + +You can specify separate fences for each of the different stages. Waiting for the initialization +of the internal decoder is important for when you need to know the sample format, channels and +sample rate of the file. + +The example below shows how you could use a fence when loading a number of sounds: + +```c +// This fence will be released when all sounds are finished loading entirely. +ma_fence fence; +ma_fence_init(&fence); + +// This will be passed into the initialization routine for each sound. +ma_resource_manager_pipeline_notifications notifications = ma_resource_manager_pipeline_notifications_init(); +notifications.done.pFence = &fence; + +// Now load a bunch of sounds: +for (iSound = 0; iSound < soundCount; iSound += 1) { +ma_resource_manager_data_source_init(pResourceManager, pSoundFilePaths[iSound], flags, ¬ifications, &pSoundSources[iSound]); +} + +// ... DO SOMETHING ELSE WHILE SOUNDS ARE LOADING ... + +// Wait for loading of sounds to finish. +ma_fence_wait(&fence); +``` + +In the example above we used a fence for waiting until the entire file has been fully decoded. If +you only need to wait for the initialization of the internal decoder to complete, you can use the +`init` member of the `ma_resource_manager_pipeline_notifications` object: + +```c +notifications.init.pFence = &fence; +``` + +If a fence is not appropriate for your situation, you can instead use a callback that is fired on +an individual sound basis. This is done in a very similar way to fences: + +```c +typedef struct +{ +ma_async_notification_callbacks cb; +void* pMyData; +} my_notification; + +void my_notification_callback(ma_async_notification* pNotification) +{ +my_notification* pMyNotification = (my_notification*)pNotification; + +// Do something in response to the sound finishing loading. +} + +... + +my_notification myCallback; +myCallback.cb.onSignal = my_notification_callback; +myCallback.pMyData = pMyData; + +ma_resource_manager_pipeline_notifications notifications = ma_resource_manager_pipeline_notifications_init(); +notifications.done.pNotification = &myCallback; + +ma_resource_manager_data_source_init(pResourceManager, "my_sound.wav", flags, ¬ifications, &mySound); +``` + +In the example above we just extend the `ma_async_notification_callbacks` object and pass an +instantiation into the `ma_resource_manager_pipeline_notifications` in the same way as we did with +the fence, only we set `pNotification` instead of `pFence`. You can set both of these at the same +time and they should both work as expected. If using the `pNotification` system, you need to ensure +your `ma_async_notification_callbacks` object stays valid. + + + +6.2. Resource Manager Implementation Details +-------------------------------------------- +Resources are managed in two main ways: + +* By storing the entire sound inside an in-memory buffer (referred to as a data buffer) +* By streaming audio data on the fly (referred to as a data stream) + +A resource managed data source (`ma_resource_manager_data_source`) encapsulates a data buffer or +data stream, depending on whether or not the data source was initialized with the +`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag. If so, it will make use of a +`ma_resource_manager_data_stream` object. Otherwise it will use a `ma_resource_manager_data_buffer` +object. Both of these objects are data sources which means they can be used with any +`ma_data_source_*()` API. + +Another major feature of the resource manager is the ability to asynchronously decode audio files. +This relieves the audio thread of time-consuming decoding which can negatively affect scalability +due to the audio thread needing to complete it's work extremely quickly to avoid glitching. +Asynchronous decoding is achieved through a job system. There is a central multi-producer, +multi-consumer, fixed-capacity job queue. When some asynchronous work needs to be done, a job is +posted to the queue which is then read by a job thread. The number of job threads can be +configured for improved scalability, and job threads can all run in parallel without needing to +worry about the order of execution (how this is achieved is explained below). + +When a sound is being loaded asynchronously, playback can begin before the sound has been fully +decoded. This enables the application to start playback of the sound quickly, while at the same +time allowing to resource manager to keep loading in the background. Since there may be less +threads than the number of sounds being loaded at a given time, a simple scheduling system is used +to keep decoding time balanced and fair. The resource manager solves this by splitting decoding +into chunks called pages. By default, each page is 1 second long. When a page has been decoded, a +new job will be posted to start decoding the next page. By dividing up decoding into pages, an +individual sound shouldn't ever delay every other sound from having their first page decoded. Of +course, when loading many sounds at the same time, there will always be an amount of time required +to process jobs in the queue so in heavy load situations there will still be some delay. To +determine if a data source is ready to have some frames read, use +`ma_resource_manager_data_source_get_available_frames()`. This will return the number of frames +available starting from the current position. + + +6.2.1. Job Queue +---------------- +The resource manager uses a job queue which is multi-producer, multi-consumer, and fixed-capacity. +This job queue is not currently lock-free, and instead uses a spinlock to achieve thread-safety. +Only a fixed number of jobs can be allocated and inserted into the queue which is done through a +lock-free data structure for allocating an index into a fixed sized array, with reference counting +for mitigation of the ABA problem. The reference count is 32-bit. + +For many types of jobs it's important that they execute in a specific order. In these cases, jobs +are executed serially. For the resource manager, serial execution of jobs is only required on a +per-object basis (per data buffer or per data stream). Each of these objects stores an execution +counter. When a job is posted it is associated with an execution counter. When the job is +processed, it checks if the execution counter of the job equals the execution counter of the +owning object and if so, processes the job. If the counters are not equal, the job will be posted +back onto the job queue for later processing. When the job finishes processing the execution order +of the main object is incremented. This system means the no matter how many job threads are +executing, decoding of an individual sound will always get processed serially. The advantage to +having multiple threads comes into play when loading multiple sounds at the same time. + +The resource manager's job queue is not 100% lock-free and will use a spinlock to achieve +thread-safety for a very small section of code. This is only relevant when the resource manager +uses more than one job thread. If only using a single job thread, which is the default, the +lock should never actually wait in practice. The amount of time spent locking should be quite +short, but it's something to be aware of for those who have pedantic lock-free requirements and +need to use more than one job thread. There are plans to remove this lock in a future version. + +In addition, posting a job will release a semaphore, which on Win32 is implemented with +`ReleaseSemaphore` and on POSIX platforms via a condition variable: + +```c +pthread_mutex_lock(&pSemaphore->lock); +{ +pSemaphore->value += 1; +pthread_cond_signal(&pSemaphore->cond); +} +pthread_mutex_unlock(&pSemaphore->lock); +``` + +Again, this is relevant for those with strict lock-free requirements in the audio thread. To avoid +this, you can use non-blocking mode (via the `MA_JOB_QUEUE_FLAG_NON_BLOCKING` +flag) and implement your own job processing routine (see the "Resource Manager" section above for +details on how to do this). + + + +6.2.2. Data Buffers +------------------- +When the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag is excluded at initialization time, the +resource manager will try to load the data into an in-memory data buffer. Before doing so, however, +it will first check if the specified file is already loaded. If so, it will increment a reference +counter and just use the already loaded data. This saves both time and memory. When the data buffer +is uninitialized, the reference counter will be decremented. If the counter hits zero, the file +will be unloaded. This is a detail to keep in mind because it could result in excessive loading and +unloading of a sound. For example, the following sequence will result in a file be loaded twice, +once after the other: + +```c +ma_resource_manager_data_source_init(pResourceManager, "my_file", ..., &myDataBuffer0); // Refcount = 1. Initial load. +ma_resource_manager_data_source_uninit(pResourceManager, &myDataBuffer0); // Refcount = 0. Unloaded. + +ma_resource_manager_data_source_init(pResourceManager, "my_file", ..., &myDataBuffer1); // Refcount = 1. Reloaded because previous uninit() unloaded it. +ma_resource_manager_data_source_uninit(pResourceManager, &myDataBuffer1); // Refcount = 0. Unloaded. +``` + +A binary search tree (BST) is used for storing data buffers as it has good balance between +efficiency and simplicity. The key of the BST is a 64-bit hash of the file path that was passed +into `ma_resource_manager_data_source_init()`. The advantage of using a hash is that it saves +memory over storing the entire path, has faster comparisons, and results in a mostly balanced BST +due to the random nature of the hash. The disadvantages are that file names are case-sensitive and +there's a small chance of name collisions. If case-sensitivity is an issue, you should normalize +your file names to upper- or lower-case before initializing your data sources. If name collisions +become an issue, you'll need to change the name of one of the colliding names or just not use the +resource manager. + +When a sound file has not already been loaded and the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC` +flag is excluded, the file will be decoded synchronously by the calling thread. There are two +options for controlling how the audio is stored in the data buffer - encoded or decoded. When the +`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE` option is excluded, the raw file data will be stored +in memory. Otherwise the sound will be decoded before storing it in memory. Synchronous loading is +a very simple and standard process of simply adding an item to the BST, allocating a block of +memory and then decoding (if `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE` is specified). + +When the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC` flag is specified, loading of the data buffer +is done asynchronously. In this case, a job is posted to the queue to start loading and then the +function immediately returns, setting an internal result code to `MA_BUSY`. This result code is +returned when the program calls `ma_resource_manager_data_source_result()`. When decoding has fully +completed `MA_SUCCESS` will be returned. This can be used to know if loading has fully completed. + +When loading asynchronously, a single job is posted to the queue of the type +`MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE`. This involves making a copy of the file path and +associating it with job. When the job is processed by the job thread, it will first load the file +using the VFS associated with the resource manager. When using a custom VFS, it's important that it +be completely thread-safe because it will be used from one or more job threads at the same time. +Individual files should only ever be accessed by one thread at a time, however. After opening the +file via the VFS, the job will determine whether or not the file is being decoded. If not, it +simply allocates a block of memory and loads the raw file contents into it and returns. On the +other hand, when the file is being decoded, it will first allocate a decoder on the heap and +initialize it. Then it will check if the length of the file is known. If so it will allocate a +block of memory to store the decoded output and initialize it to silence. If the size is unknown, +it will allocate room for one page. After memory has been allocated, the first page will be +decoded. If the sound is shorter than a page, the result code will be set to `MA_SUCCESS` and the +completion event will be signalled and loading is now complete. If, however, there is more to +decode, a job with the code `MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE` is posted. This job +will decode the next page and perform the same process if it reaches the end. If there is more to +decode, the job will post another `MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE` job which will +keep on happening until the sound has been fully decoded. For sounds of an unknown length, each +page will be linked together as a linked list. Internally this is implemented via the +`ma_paged_audio_buffer` object. + + +6.2.3. Data Streams +------------------- +Data streams only ever store two pages worth of data for each instance. They are most useful for +large sounds like music tracks in games that would consume too much memory if fully decoded in +memory. After every frame from a page has been read, a job will be posted to load the next page +which is done from the VFS. + +For data streams, the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC` flag will determine whether or +not initialization of the data source waits until the two pages have been decoded. When unset, +`ma_resource_manager_data_source_init()` will wait until the two pages have been loaded, otherwise +it will return immediately. + +When frames are read from a data stream using `ma_resource_manager_data_source_read_pcm_frames()`, +`MA_BUSY` will be returned if there are no frames available. If there are some frames available, +but less than the number requested, `MA_SUCCESS` will be returned, but the actual number of frames +read will be less than the number requested. Due to the asynchronous nature of data streams, +seeking is also asynchronous. If the data stream is in the middle of a seek, `MA_BUSY` will be +returned when trying to read frames. + +When `ma_resource_manager_data_source_read_pcm_frames()` results in a page getting fully consumed +a job is posted to load the next page. This will be posted from the same thread that called +`ma_resource_manager_data_source_read_pcm_frames()`. + +Data streams are uninitialized by posting a job to the queue, but the function won't return until +that job has been processed. The reason for this is that the caller owns the data stream object and +therefore miniaudio needs to ensure everything completes before handing back control to the caller. +Also, if the data stream is uninitialized while pages are in the middle of decoding, they must +complete before destroying any underlying object and the job system handles this cleanly. + +Note that when a new page needs to be loaded, a job will be posted to the resource manager's job +thread from the audio thread. You must keep in mind the details mentioned in the "Job Queue" +section above regarding locking when posting an event if you require a strictly lock-free audio +thread. + + + +7. Node Graph +============= +miniaudio's routing infrastructure follows a node graph paradigm. The idea is that you create a +node whose outputs are attached to inputs of another node, thereby creating a graph. There are +different types of nodes, with each node in the graph processing input data to produce output, +which is then fed through the chain. Each node in the graph can apply their own custom effects. At +the start of the graph will usually be one or more data source nodes which have no inputs and +instead pull their data from a data source. At the end of the graph is an endpoint which represents +the end of the chain and is where the final output is ultimately extracted from. + +Each node has a number of input buses and a number of output buses. An output bus from a node is +attached to an input bus of another. Multiple nodes can connect their output buses to another +node's input bus, in which case their outputs will be mixed before processing by the node. Below is +a diagram that illustrates a hypothetical node graph setup: + +``` +>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Data flows left to right >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + ++---------------+ +-----------------+ +| Data Source 1 =----+ +----------+ +----= Low Pass Filter =----+ ++---------------+ | | =----+ +-----------------+ | +----------+ ++----= Splitter | +----= ENDPOINT | ++---------------+ | | =----+ +-----------------+ | +----------+ +| Data Source 2 =----+ +----------+ +----= Echo / Delay =----+ ++---------------+ +-----------------+ +``` + +In the above graph, it starts with two data sources whose outputs are attached to the input of a +splitter node. It's at this point that the two data sources are mixed. After mixing, the splitter +performs it's processing routine and produces two outputs which is simply a duplication of the +input stream. One output is attached to a low pass filter, whereas the other output is attached to +a echo/delay. The outputs of the the low pass filter and the echo are attached to the endpoint, and +since they're both connected to the same input bus, they'll be mixed. + +Each input bus must be configured to accept the same number of channels, but the number of channels +used by input buses can be different to the number of channels for output buses in which case +miniaudio will automatically convert the input data to the output channel count before processing. +The number of channels of an output bus of one node must match the channel count of the input bus +it's attached to. The channel counts cannot be changed after the node has been initialized. If you +attempt to attach an output bus to an input bus with a different channel count, attachment will +fail. + +To use a node graph, you first need to initialize a `ma_node_graph` object. This is essentially a +container around the entire graph. The `ma_node_graph` object is required for some thread-safety +issues which will be explained later. A `ma_node_graph` object is initialized using miniaudio's +standard config/init system: + +```c +ma_node_graph_config nodeGraphConfig = ma_node_graph_config_init(myChannelCount); + +result = ma_node_graph_init(&nodeGraphConfig, NULL, &nodeGraph); // Second parameter is a pointer to allocation callbacks. +if (result != MA_SUCCESS) { +// Failed to initialize node graph. +} +``` + +When you initialize the node graph, you're specifying the channel count of the endpoint. The +endpoint is a special node which has one input bus and one output bus, both of which have the +same channel count, which is specified in the config. Any nodes that connect directly to the +endpoint must be configured such that their output buses have the same channel count. When you read +audio data from the node graph, it'll have the channel count you specified in the config. To read +data from the graph: + +```c +ma_uint32 framesRead; +result = ma_node_graph_read_pcm_frames(&nodeGraph, pFramesOut, frameCount, &framesRead); +if (result != MA_SUCCESS) { +// Failed to read data from the node graph. +} +``` + +When you read audio data, miniaudio starts at the node graph's endpoint node which then pulls in +data from it's input attachments, which in turn recursively pull in data from their inputs, and so +on. At the start of the graph there will be some kind of data source node which will have zero +inputs and will instead read directly from a data source. The base nodes don't literally need to +read from a `ma_data_source` object, but they will always have some kind of underlying object that +sources some kind of audio. The `ma_data_source_node` node can be used to read from a +`ma_data_source`. Data is always in floating-point format and in the number of channels you +specified when the graph was initialized. The sample rate is defined by the underlying data sources. +It's up to you to ensure they use a consistent and appropriate sample rate. + +The `ma_node` API is designed to allow custom nodes to be implemented with relative ease, but +miniaudio includes a few stock nodes for common functionality. This is how you would initialize a +node which reads directly from a data source (`ma_data_source_node`) which is an example of one +of the stock nodes that comes with miniaudio: + +```c +ma_data_source_node_config config = ma_data_source_node_config_init(pMyDataSource); + +ma_data_source_node dataSourceNode; +result = ma_data_source_node_init(&nodeGraph, &config, NULL, &dataSourceNode); +if (result != MA_SUCCESS) { +// Failed to create data source node. +} +``` + +The data source node will use the output channel count to determine the channel count of the output +bus. There will be 1 output bus and 0 input buses (data will be drawn directly from the data +source). The data source must output to floating-point (`ma_format_f32`) or else an error will be +returned from `ma_data_source_node_init()`. + +By default the node will not be attached to the graph. To do so, use `ma_node_attach_output_bus()`: + +```c +result = ma_node_attach_output_bus(&dataSourceNode, 0, ma_node_graph_get_endpoint(&nodeGraph), 0); +if (result != MA_SUCCESS) { +// Failed to attach node. +} +``` + +The code above connects the data source node directly to the endpoint. Since the data source node +has only a single output bus, the index will always be 0. Likewise, the endpoint only has a single +input bus which means the input bus index will also always be 0. + +To detach a specific output bus, use `ma_node_detach_output_bus()`. To detach all output buses, use +`ma_node_detach_all_output_buses()`. If you want to just move the output bus from one attachment to +another, you do not need to detach first. You can just call `ma_node_attach_output_bus()` and it'll +deal with it for you. + +Less frequently you may want to create a specialized node. This will be a node where you implement +your own processing callback to apply a custom effect of some kind. This is similar to initializing +one of the stock node types, only this time you need to specify a pointer to a vtable containing a +pointer to the processing function and the number of input and output buses. Example: + +```c +static void my_custom_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ +// Do some processing of ppFramesIn (one stream of audio data per input bus) +const float* pFramesIn_0 = ppFramesIn[0]; // Input bus @ index 0. +const float* pFramesIn_1 = ppFramesIn[1]; // Input bus @ index 1. +float* pFramesOut_0 = ppFramesOut[0]; // Output bus @ index 0. + +// Do some processing. On input, `pFrameCountIn` will be the number of input frames in each +// buffer in `ppFramesIn` and `pFrameCountOut` will be the capacity of each of the buffers +// in `ppFramesOut`. On output, `pFrameCountIn` should be set to the number of input frames +// your node consumed and `pFrameCountOut` should be set the number of output frames that +// were produced. +// +// You should process as many frames as you can. If your effect consumes input frames at the +// same rate as output frames (always the case, unless you're doing resampling), you need +// only look at `ppFramesOut` and process that exact number of frames. If you're doing +// resampling, you'll need to be sure to set both `pFrameCountIn` and `pFrameCountOut` +// properly. +} + +static ma_node_vtable my_custom_node_vtable = +{ +my_custom_node_process_pcm_frames, // The function that will be called to process your custom node. This is where you'd implement your effect processing. +NULL, // Optional. A callback for calculating the number of input frames that are required to process a specified number of output frames. +2, // 2 input buses. +1, // 1 output bus. +0 // Default flags. +}; + +... + +// Each bus needs to have a channel count specified. To do this you need to specify the channel +// counts in an array and then pass that into the node config. +ma_uint32 inputChannels[2]; // Equal in size to the number of input channels specified in the vtable. +ma_uint32 outputChannels[1]; // Equal in size to the number of output channels specified in the vtable. + +inputChannels[0] = channelsIn; +inputChannels[1] = channelsIn; +outputChannels[0] = channelsOut; + +ma_node_config nodeConfig = ma_node_config_init(); +nodeConfig.vtable = &my_custom_node_vtable; +nodeConfig.pInputChannels = inputChannels; +nodeConfig.pOutputChannels = outputChannels; + +ma_node_base node; +result = ma_node_init(&nodeGraph, &nodeConfig, NULL, &node); +if (result != MA_SUCCESS) { +// Failed to initialize node. +} +``` + +When initializing a custom node, as in the code above, you'll normally just place your vtable in +static space. The number of input and output buses are specified as part of the vtable. If you need +a variable number of buses on a per-node bases, the vtable should have the relevant bus count set +to `MA_NODE_BUS_COUNT_UNKNOWN`. In this case, the bus count should be set in the node config: + +```c +static ma_node_vtable my_custom_node_vtable = +{ +my_custom_node_process_pcm_frames, // The function that will be called process your custom node. This is where you'd implement your effect processing. +NULL, // Optional. A callback for calculating the number of input frames that are required to process a specified number of output frames. +MA_NODE_BUS_COUNT_UNKNOWN, // The number of input buses is determined on a per-node basis. +1, // 1 output bus. +0 // Default flags. +}; + +... + +ma_node_config nodeConfig = ma_node_config_init(); +nodeConfig.vtable = &my_custom_node_vtable; +nodeConfig.inputBusCount = myBusCount; // <-- Since the vtable specifies MA_NODE_BUS_COUNT_UNKNOWN, the input bus count should be set here. +nodeConfig.pInputChannels = inputChannels; // <-- Make sure there are nodeConfig.inputBusCount elements in this array. +nodeConfig.pOutputChannels = outputChannels; // <-- The vtable specifies 1 output bus, so there must be 1 element in this array. +``` + +In the above example it's important to never set the `inputBusCount` and `outputBusCount` members +to anything other than their defaults if the vtable specifies an explicit count. They can only be +set if the vtable specifies MA_NODE_BUS_COUNT_UNKNOWN in the relevant bus count. + +Most often you'll want to create a structure to encapsulate your node with some extra data. You +need to make sure the `ma_node_base` object is your first member of the structure: + +```c +typedef struct +{ +ma_node_base base; // <-- Make sure this is always the first member. +float someCustomData; +} my_custom_node; +``` + +By doing this, your object will be compatible with all `ma_node` APIs and you can attach it to the +graph just like any other node. + +In the custom processing callback (`my_custom_node_process_pcm_frames()` in the example above), the +number of channels for each bus is what was specified by the config when the node was initialized +with `ma_node_init()`. In addition, all attachments to each of the input buses will have been +pre-mixed by miniaudio. The config allows you to specify different channel counts for each +individual input and output bus. It's up to the effect to handle it appropriate, and if it can't, +return an error in it's initialization routine. + +Custom nodes can be assigned some flags to describe their behaviour. These are set via the vtable +and include the following: + ++-----------------------------------------+---------------------------------------------------+ +| Flag Name | Description | ++-----------------------------------------+---------------------------------------------------+ +| MA_NODE_FLAG_PASSTHROUGH | Useful for nodes that do not do any kind of audio | +| | processing, but are instead used for tracking | +| | time, handling events, etc. Also used by the | +| | internal endpoint node. It reads directly from | +| | the input bus to the output bus. Nodes with this | +| | flag must have exactly 1 input bus and 1 output | +| | bus, and both buses must have the same channel | +| | counts. | ++-----------------------------------------+---------------------------------------------------+ +| MA_NODE_FLAG_CONTINUOUS_PROCESSING | Causes the processing callback to be called even | +| | when no data is available to be read from input | +| | attachments. When a node has at least one input | +| | bus, but there are no inputs attached or the | +| | inputs do not deliver any data, the node's | +| | processing callback will not get fired. This flag | +| | will make it so the callback is always fired | +| | regardless of whether or not any input data is | +| | received. This is useful for effects like | +| | echos where there will be a tail of audio data | +| | that still needs to be processed even when the | +| | original data sources have reached their ends. It | +| | may also be useful for nodes that must always | +| | have their processing callback fired when there | +| | are no inputs attached. | ++-----------------------------------------+---------------------------------------------------+ +| MA_NODE_FLAG_ALLOW_NULL_INPUT | Used in conjunction with | +| | `MA_NODE_FLAG_CONTINUOUS_PROCESSING`. When this | +| | is set, the `ppFramesIn` parameter of the | +| | processing callback will be set to NULL when | +| | there are no input frames are available. When | +| | this is unset, silence will be posted to the | +| | processing callback. | ++-----------------------------------------+---------------------------------------------------+ +| MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES | Used to tell miniaudio that input and output | +| | frames are processed at different rates. You | +| | should set this for any nodes that perform | +| | resampling. | ++-----------------------------------------+---------------------------------------------------+ +| MA_NODE_FLAG_SILENT_OUTPUT | Used to tell miniaudio that a node produces only | +| | silent output. This is useful for nodes where you | +| | don't want the output to contribute to the final | +| | mix. An example might be if you want split your | +| | stream and have one branch be output to a file. | +| | When using this flag, you should avoid writing to | +| | the output buffer of the node's processing | +| | callback because miniaudio will ignore it anyway. | ++-----------------------------------------+---------------------------------------------------+ + + +If you need to make a copy of an audio stream for effect processing you can use a splitter node +called `ma_splitter_node`. This takes has 1 input bus and splits the stream into 2 output buses. +You can use it like this: + +```c +ma_splitter_node_config splitterNodeConfig = ma_splitter_node_config_init(channels); + +ma_splitter_node splitterNode; +result = ma_splitter_node_init(&nodeGraph, &splitterNodeConfig, NULL, &splitterNode); +if (result != MA_SUCCESS) { +// Failed to create node. +} + +// Attach your output buses to two different input buses (can be on two different nodes). +ma_node_attach_output_bus(&splitterNode, 0, ma_node_graph_get_endpoint(&nodeGraph), 0); // Attach directly to the endpoint. +ma_node_attach_output_bus(&splitterNode, 1, &myEffectNode, 0); // Attach to input bus 0 of some effect node. +``` + +The volume of an output bus can be configured on a per-bus basis: + +```c +ma_node_set_output_bus_volume(&splitterNode, 0, 0.5f); +ma_node_set_output_bus_volume(&splitterNode, 1, 0.5f); +``` + +In the code above we're using the splitter node from before and changing the volume of each of the +copied streams. + +You can start and stop a node with the following: + +```c +ma_node_set_state(&splitterNode, ma_node_state_started); // The default state. +ma_node_set_state(&splitterNode, ma_node_state_stopped); +``` + +By default the node is in a started state, but since it won't be connected to anything won't +actually be invoked by the node graph until it's connected. When you stop a node, data will not be +read from any of it's input connections. You can use this property to stop a group of sounds +atomically. + +You can configure the initial state of a node in it's config: + +```c +nodeConfig.initialState = ma_node_state_stopped; +``` + +Note that for the stock specialized nodes, all of their configs will have a `nodeConfig` member +which is the config to use with the base node. This is where the initial state can be configured +for specialized nodes: + +```c +dataSourceNodeConfig.nodeConfig.initialState = ma_node_state_stopped; +``` + +When using a specialized node like `ma_data_source_node` or `ma_splitter_node`, be sure to not +modify the `vtable` member of the `nodeConfig` object. + + +7.1. Timing +----------- +The node graph supports starting and stopping nodes at scheduled times. This is especially useful +for data source nodes where you want to get the node set up, but only start playback at a specific +time. There are two clocks: local and global. + +A local clock is per-node, whereas the global clock is per graph. Scheduling starts and stops can +only be done based on the global clock because the local clock will not be running while the node +is stopped. The global clocks advances whenever `ma_node_graph_read_pcm_frames()` is called. On the +other hand, the local clock only advances when the node's processing callback is fired, and is +advanced based on the output frame count. + +To retrieve the global time, use `ma_node_graph_get_time()`. The global time can be set with +`ma_node_graph_set_time()` which might be useful if you want to do seeking on a global timeline. +Getting and setting the local time is similar. Use `ma_node_get_time()` to retrieve the local time, +and `ma_node_set_time()` to set the local time. The global and local times will be advanced by the +audio thread, so care should be taken to avoid data races. Ideally you should avoid calling these +outside of the node processing callbacks which are always run on the audio thread. + +There is basic support for scheduling the starting and stopping of nodes. You can only schedule one +start and one stop at a time. This is mainly intended for putting nodes into a started or stopped +state in a frame-exact manner. Without this mechanism, starting and stopping of a node is limited +to the resolution of a call to `ma_node_graph_read_pcm_frames()` which would typically be in blocks +of several milliseconds. The following APIs can be used for scheduling node states: + +```c +ma_node_set_state_time() +ma_node_get_state_time() +``` + +The time is absolute and must be based on the global clock. An example is below: + +```c +ma_node_set_state_time(&myNode, ma_node_state_started, sampleRate*1); // Delay starting to 1 second. +ma_node_set_state_time(&myNode, ma_node_state_stopped, sampleRate*5); // Delay stopping to 5 seconds. +``` + +An example for changing the state using a relative time. + +```c +ma_node_set_state_time(&myNode, ma_node_state_started, sampleRate*1 + ma_node_graph_get_time(&myNodeGraph)); +ma_node_set_state_time(&myNode, ma_node_state_stopped, sampleRate*5 + ma_node_graph_get_time(&myNodeGraph)); +``` + +Note that due to the nature of multi-threading the times may not be 100% exact. If this is an +issue, consider scheduling state changes from within a processing callback. An idea might be to +have some kind of passthrough trigger node that is used specifically for tracking time and handling +events. + + + +7.2. Thread Safety and Locking +------------------------------ +When processing audio, it's ideal not to have any kind of locking in the audio thread. Since it's +expected that `ma_node_graph_read_pcm_frames()` would be run on the audio thread, it does so +without the use of any locks. This section discusses the implementation used by miniaudio and goes +over some of the compromises employed by miniaudio to achieve this goal. Note that the current +implementation may not be ideal - feedback and critiques are most welcome. + +The node graph API is not *entirely* lock-free. Only `ma_node_graph_read_pcm_frames()` is expected +to be lock-free. Attachment, detachment and uninitialization of nodes use locks to simplify the +implementation, but are crafted in a way such that such locking is not required when reading audio +data from the graph. Locking in these areas are achieved by means of spinlocks. + +The main complication with keeping `ma_node_graph_read_pcm_frames()` lock-free stems from the fact +that a node can be uninitialized, and it's memory potentially freed, while in the middle of being +processed on the audio thread. There are times when the audio thread will be referencing a node, +which means the uninitialization process of a node needs to make sure it delays returning until the +audio thread is finished so that control is not handed back to the caller thereby giving them a +chance to free the node's memory. + +When the audio thread is processing a node, it does so by reading from each of the output buses of +the node. In order for a node to process data for one of it's output buses, it needs to read from +each of it's input buses, and so on an so forth. It follows that once all output buses of a node +are detached, the node as a whole will be disconnected and no further processing will occur unless +it's output buses are reattached, which won't be happening when the node is being uninitialized. +By having `ma_node_detach_output_bus()` wait until the audio thread is finished with it, we can +simplify a few things, at the expense of making `ma_node_detach_output_bus()` a bit slower. By +doing this, the implementation of `ma_node_uninit()` becomes trivial - just detach all output +nodes, followed by each of the attachments to each of it's input nodes, and then do any final clean +up. + +With the above design, the worst-case scenario is `ma_node_detach_output_bus()` taking as long as +it takes to process the output bus being detached. This will happen if it's called at just the +wrong moment where the audio thread has just iterated it and has just started processing. The +caller of `ma_node_detach_output_bus()` will stall until the audio thread is finished, which +includes the cost of recursively processing it's inputs. This is the biggest compromise made with +the approach taken by miniaudio for it's lock-free processing system. The cost of detaching nodes +earlier in the pipeline (data sources, for example) will be cheaper than the cost of detaching +higher level nodes, such as some kind of final post-processing endpoint. If you need to do mass +detachments, detach starting from the lowest level nodes and work your way towards the final +endpoint node (but don't try detaching the node graph's endpoint). If the audio thread is not +running, detachment will be fast and detachment in any order will be the same. The reason nodes +need to wait for their input attachments to complete is due to the potential for desyncs between +data sources. If the node was to terminate processing mid way through processing it's inputs, +there's a chance that some of the underlying data sources will have been read, but then others not. +That will then result in a potential desynchronization when detaching and reattaching higher-level +nodes. A possible solution to this is to have an option when detaching to terminate processing +before processing all input attachments which should be fairly simple. + +Another compromise, albeit less significant, is locking when attaching and detaching nodes. This +locking is achieved by means of a spinlock in order to reduce memory overhead. A lock is present +for each input bus and output bus. When an output bus is connected to an input bus, both the output +bus and input bus is locked. This locking is specifically for attaching and detaching across +different threads and does not affect `ma_node_graph_read_pcm_frames()` in any way. The locking and +unlocking is mostly self-explanatory, but a slightly less intuitive aspect comes into it when +considering that iterating over attachments must not break as a result of attaching or detaching a +node while iteration is occurring. + +Attaching and detaching are both quite simple. When an output bus of a node is attached to an input +bus of another node, it's added to a linked list. Basically, an input bus is a linked list, where +each item in the list is and output bus. We have some intentional (and convenient) restrictions on +what can done with the linked list in order to simplify the implementation. First of all, whenever +something needs to iterate over the list, it must do so in a forward direction. Backwards iteration +is not supported. Also, items can only be added to the start of the list. + +The linked list is a doubly-linked list where each item in the list (an output bus) holds a pointer +to the next item in the list, and another to the previous item. A pointer to the previous item is +only required for fast detachment of the node - it is never used in iteration. This is an +important property because it means from the perspective of iteration, attaching and detaching of +an item can be done with a single atomic assignment. This is exploited by both the attachment and +detachment process. When attaching the node, the first thing that is done is the setting of the +local "next" and "previous" pointers of the node. After that, the item is "attached" to the list +by simply performing an atomic exchange with the head pointer. After that, the node is "attached" +to the list from the perspective of iteration. Even though the "previous" pointer of the next item +hasn't yet been set, from the perspective of iteration it's been attached because iteration will +only be happening in a forward direction which means the "previous" pointer won't actually ever get +used. The same general process applies to detachment. See `ma_node_attach_output_bus()` and +`ma_node_detach_output_bus()` for the implementation of this mechanism. + + + +8. Decoding +=========== +The `ma_decoder` API is used for reading audio files. Decoders are completely decoupled from +devices and can be used independently. Built-in support is included for the following formats: + ++---------+ +| Format | ++---------+ +| WAV | +| MP3 | +| FLAC | ++---------+ + +You can disable the built-in decoders by specifying one or more of the following options before the +miniaudio implementation: + +```c +#define MA_NO_WAV +#define MA_NO_MP3 +#define MA_NO_FLAC +``` + +miniaudio supports the ability to plug in custom decoders. See the section below for details on how +to use custom decoders. + +A decoder can be initialized from a file with `ma_decoder_init_file()`, a block of memory with +`ma_decoder_init_memory()`, or from data delivered via callbacks with `ma_decoder_init()`. Here is +an example for loading a decoder from a file: + +```c +ma_decoder decoder; +ma_result result = ma_decoder_init_file("MySong.mp3", NULL, &decoder); +if (result != MA_SUCCESS) { +return false; // An error occurred. +} + +... + +ma_decoder_uninit(&decoder); +``` + +When initializing a decoder, you can optionally pass in a pointer to a `ma_decoder_config` object +(the `NULL` argument in the example above) which allows you to configure the output format, channel +count, sample rate and channel map: + +```c +ma_decoder_config config = ma_decoder_config_init(ma_format_f32, 2, 48000); +``` + +When passing in `NULL` for decoder config in `ma_decoder_init*()`, the output format will be the +same as that defined by the decoding backend. + +Data is read from the decoder as PCM frames. This will output the number of PCM frames actually +read. If this is less than the requested number of PCM frames it means you've reached the end. The +return value will be `MA_AT_END` if no samples have been read and the end has been reached. + +```c +ma_result result = ma_decoder_read_pcm_frames(pDecoder, pFrames, framesToRead, &framesRead); +if (framesRead < framesToRead) { +// Reached the end. +} +``` + +You can also seek to a specific frame like so: + +```c +ma_result result = ma_decoder_seek_to_pcm_frame(pDecoder, targetFrame); +if (result != MA_SUCCESS) { +return false; // An error occurred. +} +``` + +If you want to loop back to the start, you can simply seek back to the first PCM frame: + +```c +ma_decoder_seek_to_pcm_frame(pDecoder, 0); +``` + +When loading a decoder, miniaudio uses a trial and error technique to find the appropriate decoding +backend. This can be unnecessarily inefficient if the type is already known. In this case you can +use `encodingFormat` variable in the device config to specify a specific encoding format you want +to decode: + +```c +decoderConfig.encodingFormat = ma_encoding_format_wav; +``` + +See the `ma_encoding_format` enum for possible encoding formats. + +The `ma_decoder_init_file()` API will try using the file extension to determine which decoding +backend to prefer. + + +8.1. Custom Decoders +-------------------- +It's possible to implement a custom decoder and plug it into miniaudio. This is extremely useful +when you want to use the `ma_decoder` API, but need to support an encoding format that's not one of +the stock formats supported by miniaudio. This can be put to particularly good use when using the +`ma_engine` and/or `ma_resource_manager` APIs because they use `ma_decoder` internally. If, for +example, you wanted to support Opus, you can do so with a custom decoder (there if a reference +Opus decoder in the "extras" folder of the miniaudio repository which uses libopus + libopusfile). + +A custom decoder must implement a data source. A vtable called `ma_decoding_backend_vtable` needs +to be implemented which is then passed into the decoder config: + +```c +ma_decoding_backend_vtable* pCustomBackendVTables[] = +{ +&g_ma_decoding_backend_vtable_libvorbis, +&g_ma_decoding_backend_vtable_libopus +}; + +... + +decoderConfig = ma_decoder_config_init_default(); +decoderConfig.pCustomBackendUserData = NULL; +decoderConfig.ppCustomBackendVTables = pCustomBackendVTables; +decoderConfig.customBackendCount = sizeof(pCustomBackendVTables) / sizeof(pCustomBackendVTables[0]); +``` + +The `ma_decoding_backend_vtable` vtable has the following functions: + +``` +onInit +onInitFile +onInitFileW +onInitMemory +onUninit +``` + +There are only two functions that must be implemented - `onInit` and `onUninit`. The other +functions can be implemented for a small optimization for loading from a file path or memory. If +these are not specified, miniaudio will deal with it for you via a generic implementation. + +When you initialize a custom data source (by implementing the `onInit` function in the vtable) you +will need to output a pointer to a `ma_data_source` which implements your custom decoder. See the +section about data sources for details on how to implement this. Alternatively, see the +"custom_decoders" example in the miniaudio repository. + +The `onInit` function takes a pointer to some callbacks for the purpose of reading raw audio data +from some arbitrary source. You'll use these functions to read from the raw data and perform the +decoding. When you call them, you will pass in the `pReadSeekTellUserData` pointer to the relevant +parameter. + +The `pConfig` parameter in `onInit` can be used to configure the backend if appropriate. It's only +used as a hint and can be ignored. However, if any of the properties are relevant to your decoder, +an optimal implementation will handle the relevant properties appropriately. + +If memory allocation is required, it should be done so via the specified allocation callbacks if +possible (the `pAllocationCallbacks` parameter). + +If an error occurs when initializing the decoder, you should leave `ppBackend` unset, or set to +NULL, and make sure everything is cleaned up appropriately and an appropriate result code returned. +When multiple custom backends are specified, miniaudio will cycle through the vtables in the order +they're listed in the array that's passed into the decoder config so it's important that your +initialization routine is clean. + +When a decoder is uninitialized, the `onUninit` callback will be fired which will give you an +opportunity to clean up and internal data. + + + +9. Encoding +=========== +The `ma_encoding` API is used for writing audio files. The only supported output format is WAV. +This can be disabled by specifying the following option before the implementation of miniaudio: + +```c +#define MA_NO_WAV +``` + +An encoder can be initialized to write to a file with `ma_encoder_init_file()` or from data +delivered via callbacks with `ma_encoder_init()`. Below is an example for initializing an encoder +to output to a file. + +```c +ma_encoder_config config = ma_encoder_config_init(ma_encoding_format_wav, FORMAT, CHANNELS, SAMPLE_RATE); +ma_encoder encoder; +ma_result result = ma_encoder_init_file("my_file.wav", &config, &encoder); +if (result != MA_SUCCESS) { +// Error +} + +... + +ma_encoder_uninit(&encoder); +``` + +When initializing an encoder you must specify a config which is initialized with +`ma_encoder_config_init()`. Here you must specify the file type, the output sample format, output +channel count and output sample rate. The following file types are supported: + ++------------------------+-------------+ +| Enum | Description | ++------------------------+-------------+ +| ma_encoding_format_wav | WAV | ++------------------------+-------------+ + +If the format, channel count or sample rate is not supported by the output file type an error will +be returned. The encoder will not perform data conversion so you will need to convert it before +outputting any audio data. To output audio data, use `ma_encoder_write_pcm_frames()`, like in the +example below: + +```c +framesWritten = ma_encoder_write_pcm_frames(&encoder, pPCMFramesToWrite, framesToWrite); +``` + +Encoders must be uninitialized with `ma_encoder_uninit()`. + + + +10. Data Conversion +=================== +A data conversion API is included with miniaudio which supports the majority of data conversion +requirements. This supports conversion between sample formats, channel counts (with channel +mapping) and sample rates. + + +10.1. Sample Format Conversion +------------------------------ +Conversion between sample formats is achieved with the `ma_pcm_*_to_*()`, `ma_pcm_convert()` and +`ma_convert_pcm_frames_format()` APIs. Use `ma_pcm_*_to_*()` to convert between two specific +formats. Use `ma_pcm_convert()` to convert based on a `ma_format` variable. Use +`ma_convert_pcm_frames_format()` to convert PCM frames where you want to specify the frame count +and channel count as a variable instead of the total sample count. + + +10.1.1. Dithering +----------------- +Dithering can be set using the ditherMode parameter. + +The different dithering modes include the following, in order of efficiency: + ++-----------+--------------------------+ +| Type | Enum Token | ++-----------+--------------------------+ +| None | ma_dither_mode_none | +| Rectangle | ma_dither_mode_rectangle | +| Triangle | ma_dither_mode_triangle | ++-----------+--------------------------+ + +Note that even if the dither mode is set to something other than `ma_dither_mode_none`, it will be +ignored for conversions where dithering is not needed. Dithering is available for the following +conversions: + +``` +s16 -> u8 +s24 -> u8 +s32 -> u8 +f32 -> u8 +s24 -> s16 +s32 -> s16 +f32 -> s16 +``` + +Note that it is not an error to pass something other than ma_dither_mode_none for conversions where +dither is not used. It will just be ignored. + + + +10.2. Channel Conversion +------------------------ +Channel conversion is used for channel rearrangement and conversion from one channel count to +another. The `ma_channel_converter` API is used for channel conversion. Below is an example of +initializing a simple channel converter which converts from mono to stereo. + +```c +ma_channel_converter_config config = ma_channel_converter_config_init( +ma_format, // Sample format +1, // Input channels +NULL, // Input channel map +2, // Output channels +NULL, // Output channel map +ma_channel_mix_mode_default); // The mixing algorithm to use when combining channels. + +result = ma_channel_converter_init(&config, NULL, &converter); +if (result != MA_SUCCESS) { +// Error. +} +``` + +To perform the conversion simply call `ma_channel_converter_process_pcm_frames()` like so: + +```c +ma_result result = ma_channel_converter_process_pcm_frames(&converter, pFramesOut, pFramesIn, frameCount); +if (result != MA_SUCCESS) { +// Error. +} +``` + +It is up to the caller to ensure the output buffer is large enough to accommodate the new PCM +frames. + +Input and output PCM frames are always interleaved. Deinterleaved layouts are not supported. + + +10.2.1. Channel Mapping +----------------------- +In addition to converting from one channel count to another, like the example above, the channel +converter can also be used to rearrange channels. When initializing the channel converter, you can +optionally pass in channel maps for both the input and output frames. If the channel counts are the +same, and each channel map contains the same channel positions with the exception that they're in +a different order, a simple shuffling of the channels will be performed. If, however, there is not +a 1:1 mapping of channel positions, or the channel counts differ, the input channels will be mixed +based on a mixing mode which is specified when initializing the `ma_channel_converter_config` +object. + +When converting from mono to multi-channel, the mono channel is simply copied to each output +channel. When going the other way around, the audio of each output channel is simply averaged and +copied to the mono channel. + +In more complicated cases blending is used. The `ma_channel_mix_mode_simple` mode will drop excess +channels and silence extra channels. For example, converting from 4 to 2 channels, the 3rd and 4th +channels will be dropped, whereas converting from 2 to 4 channels will put silence into the 3rd and +4th channels. + +The `ma_channel_mix_mode_rectangle` mode uses spacial locality based on a rectangle to compute a +simple distribution between input and output. Imagine sitting in the middle of a room, with +speakers on the walls representing channel positions. The `MA_CHANNEL_FRONT_LEFT` position can be +thought of as being in the corner of the front and left walls. + +Finally, the `ma_channel_mix_mode_custom_weights` mode can be used to use custom user-defined +weights. Custom weights can be passed in as the last parameter of +`ma_channel_converter_config_init()`. + +Predefined channel maps can be retrieved with `ma_channel_map_init_standard()`. This takes a +`ma_standard_channel_map` enum as it's first parameter, which can be one of the following: + ++-----------------------------------+-----------------------------------------------------------+ +| Name | Description | ++-----------------------------------+-----------------------------------------------------------+ +| ma_standard_channel_map_default | Default channel map used by miniaudio. See below. | +| ma_standard_channel_map_microsoft | Channel map used by Microsoft's bitfield channel maps. | +| ma_standard_channel_map_alsa | Default ALSA channel map. | +| ma_standard_channel_map_rfc3551 | RFC 3551. Based on AIFF. | +| ma_standard_channel_map_flac | FLAC channel map. | +| ma_standard_channel_map_vorbis | Vorbis channel map. | +| ma_standard_channel_map_sound4 | FreeBSD's sound(4). | +| ma_standard_channel_map_sndio | sndio channel map. http://www.sndio.org/tips.html. | +| ma_standard_channel_map_webaudio | https://webaudio.github.io/web-audio-api/#ChannelOrdering | ++-----------------------------------+-----------------------------------------------------------+ + +Below are the channel maps used by default in miniaudio (`ma_standard_channel_map_default`): + ++---------------+---------------------------------+ +| Channel Count | Mapping | ++---------------+---------------------------------+ +| 1 (Mono) | 0: MA_CHANNEL_MONO | ++---------------+---------------------------------+ +| 2 (Stereo) | 0: MA_CHANNEL_FRONT_LEFT
| +| | 1: MA_CHANNEL_FRONT_RIGHT | ++---------------+---------------------------------+ +| 3 | 0: MA_CHANNEL_FRONT_LEFT
| +| | 1: MA_CHANNEL_FRONT_RIGHT
| +| | 2: MA_CHANNEL_FRONT_CENTER | ++---------------+---------------------------------+ +| 4 (Surround) | 0: MA_CHANNEL_FRONT_LEFT
| +| | 1: MA_CHANNEL_FRONT_RIGHT
| +| | 2: MA_CHANNEL_FRONT_CENTER
| +| | 3: MA_CHANNEL_BACK_CENTER | ++---------------+---------------------------------+ +| 5 | 0: MA_CHANNEL_FRONT_LEFT
| +| | 1: MA_CHANNEL_FRONT_RIGHT
| +| | 2: MA_CHANNEL_FRONT_CENTER
| +| | 3: MA_CHANNEL_BACK_LEFT
| +| | 4: MA_CHANNEL_BACK_RIGHT | ++---------------+---------------------------------+ +| 6 (5.1) | 0: MA_CHANNEL_FRONT_LEFT
| +| | 1: MA_CHANNEL_FRONT_RIGHT
| +| | 2: MA_CHANNEL_FRONT_CENTER
| +| | 3: MA_CHANNEL_LFE
| +| | 4: MA_CHANNEL_SIDE_LEFT
| +| | 5: MA_CHANNEL_SIDE_RIGHT | ++---------------+---------------------------------+ +| 7 | 0: MA_CHANNEL_FRONT_LEFT
| +| | 1: MA_CHANNEL_FRONT_RIGHT
| +| | 2: MA_CHANNEL_FRONT_CENTER
| +| | 3: MA_CHANNEL_LFE
| +| | 4: MA_CHANNEL_BACK_CENTER
| +| | 4: MA_CHANNEL_SIDE_LEFT
| +| | 5: MA_CHANNEL_SIDE_RIGHT | ++---------------+---------------------------------+ +| 8 (7.1) | 0: MA_CHANNEL_FRONT_LEFT
| +| | 1: MA_CHANNEL_FRONT_RIGHT
| +| | 2: MA_CHANNEL_FRONT_CENTER
| +| | 3: MA_CHANNEL_LFE
| +| | 4: MA_CHANNEL_BACK_LEFT
| +| | 5: MA_CHANNEL_BACK_RIGHT
| +| | 6: MA_CHANNEL_SIDE_LEFT
| +| | 7: MA_CHANNEL_SIDE_RIGHT | ++---------------+---------------------------------+ +| Other | All channels set to 0. This | +| | is equivalent to the same | +| | mapping as the device. | ++---------------+---------------------------------+ + + + +10.3. Resampling +---------------- +Resampling is achieved with the `ma_resampler` object. To create a resampler object, do something +like the following: + +```c +ma_resampler_config config = ma_resampler_config_init( +ma_format_s16, +channels, +sampleRateIn, +sampleRateOut, +ma_resample_algorithm_linear); + +ma_resampler resampler; +ma_result result = ma_resampler_init(&config, &resampler); +if (result != MA_SUCCESS) { +// An error occurred... +} +``` + +Do the following to uninitialize the resampler: + +```c +ma_resampler_uninit(&resampler); +``` + +The following example shows how data can be processed + +```c +ma_uint64 frameCountIn = 1000; +ma_uint64 frameCountOut = 2000; +ma_result result = ma_resampler_process_pcm_frames(&resampler, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut); +if (result != MA_SUCCESS) { +// An error occurred... +} + +// At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the +// number of output frames written. +``` + +To initialize the resampler you first need to set up a config (`ma_resampler_config`) with +`ma_resampler_config_init()`. You need to specify the sample format you want to use, the number of +channels, the input and output sample rate, and the algorithm. + +The sample format can be either `ma_format_s16` or `ma_format_f32`. If you need a different format +you will need to perform pre- and post-conversions yourself where necessary. Note that the format +is the same for both input and output. The format cannot be changed after initialization. + +The resampler supports multiple channels and is always interleaved (both input and output). The +channel count cannot be changed after initialization. + +The sample rates can be anything other than zero, and are always specified in hertz. They should be +set to something like 44100, etc. The sample rate is the only configuration property that can be +changed after initialization. + +The miniaudio resampler has built-in support for the following algorithms: + ++-----------+------------------------------+ +| Algorithm | Enum Token | ++-----------+------------------------------+ +| Linear | ma_resample_algorithm_linear | +| Custom | ma_resample_algorithm_custom | ++-----------+------------------------------+ + +The algorithm cannot be changed after initialization. + +Processing always happens on a per PCM frame basis and always assumes interleaved input and output. +De-interleaved processing is not supported. To process frames, use +`ma_resampler_process_pcm_frames()`. On input, this function takes the number of output frames you +can fit in the output buffer and the number of input frames contained in the input buffer. On +output these variables contain the number of output frames that were written to the output buffer +and the number of input frames that were consumed in the process. You can pass in NULL for the +input buffer in which case it will be treated as an infinitely large buffer of zeros. The output +buffer can also be NULL, in which case the processing will be treated as seek. + +The sample rate can be changed dynamically on the fly. You can change this with explicit sample +rates with `ma_resampler_set_rate()` and also with a decimal ratio with +`ma_resampler_set_rate_ratio()`. The ratio is in/out. + +Sometimes it's useful to know exactly how many input frames will be required to output a specific +number of frames. You can calculate this with `ma_resampler_get_required_input_frame_count()`. +Likewise, it's sometimes useful to know exactly how many frames would be output given a certain +number of input frames. You can do this with `ma_resampler_get_expected_output_frame_count()`. + +Due to the nature of how resampling works, the resampler introduces some latency. This can be +retrieved in terms of both the input rate and the output rate with +`ma_resampler_get_input_latency()` and `ma_resampler_get_output_latency()`. + + +10.3.1. Resampling Algorithms +----------------------------- +The choice of resampling algorithm depends on your situation and requirements. + + +10.3.1.1. Linear Resampling +--------------------------- +The linear resampler is the fastest, but comes at the expense of poorer quality. There is, however, +some control over the quality of the linear resampler which may make it a suitable option depending +on your requirements. + +The linear resampler performs low-pass filtering before or after downsampling or upsampling, +depending on the sample rates you're converting between. When decreasing the sample rate, the +low-pass filter will be applied before downsampling. When increasing the rate it will be performed +after upsampling. By default a fourth order low-pass filter will be applied. This can be configured +via the `lpfOrder` configuration variable. Setting this to 0 will disable filtering. + +The low-pass filter has a cutoff frequency which defaults to half the sample rate of the lowest of +the input and output sample rates (Nyquist Frequency). + +The API for the linear resampler is the same as the main resampler API, only it's called +`ma_linear_resampler`. + + +10.3.2. Custom Resamplers +------------------------- +You can implement a custom resampler by using the `ma_resample_algorithm_custom` resampling +algorithm and setting a vtable in the resampler config: + +```c +ma_resampler_config config = ma_resampler_config_init(..., ma_resample_algorithm_custom); +config.pBackendVTable = &g_customResamplerVTable; +``` + +Custom resamplers are useful if the stock algorithms are not appropriate for your use case. You +need to implement the required functions in `ma_resampling_backend_vtable`. Note that not all +functions in the vtable need to be implemented, but if it's possible to implement, they should be. + +You can use the `ma_linear_resampler` object for an example on how to implement the vtable. The +`onGetHeapSize` callback is used to calculate the size of any internal heap allocation the custom +resampler will need to make given the supplied config. When you initialize the resampler via the +`onInit` callback, you'll be given a pointer to a heap allocation which is where you should store +the heap allocated data. You should not free this data in `onUninit` because miniaudio will manage +it for you. + +The `onProcess` callback is where the actual resampling takes place. On input, `pFrameCountIn` +points to a variable containing the number of frames in the `pFramesIn` buffer and +`pFrameCountOut` points to a variable containing the capacity in frames of the `pFramesOut` buffer. +On output, `pFrameCountIn` should be set to the number of input frames that were fully consumed, +whereas `pFrameCountOut` should be set to the number of frames that were written to `pFramesOut`. + +The `onSetRate` callback is optional and is used for dynamically changing the sample rate. If +dynamic rate changes are not supported, you can set this callback to NULL. + +The `onGetInputLatency` and `onGetOutputLatency` functions are used for retrieving the latency in +input and output rates respectively. These can be NULL in which case latency calculations will be +assumed to be NULL. + +The `onGetRequiredInputFrameCount` callback is used to give miniaudio a hint as to how many input +frames are required to be available to produce the given number of output frames. Likewise, the +`onGetExpectedOutputFrameCount` callback is used to determine how many output frames will be +produced given the specified number of input frames. miniaudio will use these as a hint, but they +are optional and can be set to NULL if you're unable to implement them. + + + +10.4. General Data Conversion +----------------------------- +The `ma_data_converter` API can be used to wrap sample format conversion, channel conversion and +resampling into one operation. This is what miniaudio uses internally to convert between the format +requested when the device was initialized and the format of the backend's native device. The API +for general data conversion is very similar to the resampling API. Create a `ma_data_converter` +object like this: + +```c +ma_data_converter_config config = ma_data_converter_config_init( +inputFormat, +outputFormat, +inputChannels, +outputChannels, +inputSampleRate, +outputSampleRate +); + +ma_data_converter converter; +ma_result result = ma_data_converter_init(&config, NULL, &converter); +if (result != MA_SUCCESS) { +// An error occurred... +} +``` + +In the example above we use `ma_data_converter_config_init()` to initialize the config, however +there's many more properties that can be configured, such as channel maps and resampling quality. +Something like the following may be more suitable depending on your requirements: + +```c +ma_data_converter_config config = ma_data_converter_config_init_default(); +config.formatIn = inputFormat; +config.formatOut = outputFormat; +config.channelsIn = inputChannels; +config.channelsOut = outputChannels; +config.sampleRateIn = inputSampleRate; +config.sampleRateOut = outputSampleRate; +ma_channel_map_init_standard(ma_standard_channel_map_flac, config.channelMapIn, sizeof(config.channelMapIn)/sizeof(config.channelMapIn[0]), config.channelCountIn); +config.resampling.linear.lpfOrder = MA_MAX_FILTER_ORDER; +``` + +Do the following to uninitialize the data converter: + +```c +ma_data_converter_uninit(&converter, NULL); +``` + +The following example shows how data can be processed + +```c +ma_uint64 frameCountIn = 1000; +ma_uint64 frameCountOut = 2000; +ma_result result = ma_data_converter_process_pcm_frames(&converter, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut); +if (result != MA_SUCCESS) { +// An error occurred... +} + +// At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the number +// of output frames written. +``` + +The data converter supports multiple channels and is always interleaved (both input and output). +The channel count cannot be changed after initialization. + +Sample rates can be anything other than zero, and are always specified in hertz. They should be set +to something like 44100, etc. The sample rate is the only configuration property that can be +changed after initialization, but only if the `resampling.allowDynamicSampleRate` member of +`ma_data_converter_config` is set to `MA_TRUE`. To change the sample rate, use +`ma_data_converter_set_rate()` or `ma_data_converter_set_rate_ratio()`. The ratio must be in/out. +The resampling algorithm cannot be changed after initialization. + +Processing always happens on a per PCM frame basis and always assumes interleaved input and output. +De-interleaved processing is not supported. To process frames, use +`ma_data_converter_process_pcm_frames()`. On input, this function takes the number of output frames +you can fit in the output buffer and the number of input frames contained in the input buffer. On +output these variables contain the number of output frames that were written to the output buffer +and the number of input frames that were consumed in the process. You can pass in NULL for the +input buffer in which case it will be treated as an infinitely large +buffer of zeros. The output buffer can also be NULL, in which case the processing will be treated +as seek. + +Sometimes it's useful to know exactly how many input frames will be required to output a specific +number of frames. You can calculate this with `ma_data_converter_get_required_input_frame_count()`. +Likewise, it's sometimes useful to know exactly how many frames would be output given a certain +number of input frames. You can do this with `ma_data_converter_get_expected_output_frame_count()`. + +Due to the nature of how resampling works, the data converter introduces some latency if resampling +is required. This can be retrieved in terms of both the input rate and the output rate with +`ma_data_converter_get_input_latency()` and `ma_data_converter_get_output_latency()`. + + + +11. Filtering +============= + +11.1. Biquad Filtering +---------------------- +Biquad filtering is achieved with the `ma_biquad` API. Example: + +```c +ma_biquad_config config = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2); +ma_result result = ma_biquad_init(&config, &biquad); +if (result != MA_SUCCESS) { +// Error. +} + +... + +ma_biquad_process_pcm_frames(&biquad, pFramesOut, pFramesIn, frameCount); +``` + +Biquad filtering is implemented using transposed direct form 2. The numerator coefficients are b0, +b1 and b2, and the denominator coefficients are a0, a1 and a2. The a0 coefficient is required and +coefficients must not be pre-normalized. + +Supported formats are `ma_format_s16` and `ma_format_f32`. If you need to use a different format +you need to convert it yourself beforehand. When using `ma_format_s16` the biquad filter will use +fixed point arithmetic. When using `ma_format_f32`, floating point arithmetic will be used. + +Input and output frames are always interleaved. + +Filtering can be applied in-place by passing in the same pointer for both the input and output +buffers, like so: + +```c +ma_biquad_process_pcm_frames(&biquad, pMyData, pMyData, frameCount); +``` + +If you need to change the values of the coefficients, but maintain the values in the registers you +can do so with `ma_biquad_reinit()`. This is useful if you need to change the properties of the +filter while keeping the values of registers valid to avoid glitching. Do not use +`ma_biquad_init()` for this as it will do a full initialization which involves clearing the +registers to 0. Note that changing the format or channel count after initialization is invalid and +will result in an error. + + +11.2. Low-Pass Filtering +------------------------ +Low-pass filtering is achieved with the following APIs: + ++---------+------------------------------------------+ +| API | Description | ++---------+------------------------------------------+ +| ma_lpf1 | First order low-pass filter | +| ma_lpf2 | Second order low-pass filter | +| ma_lpf | High order low-pass filter (Butterworth) | ++---------+------------------------------------------+ + +Low-pass filter example: + +```c +ma_lpf_config config = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); +ma_result result = ma_lpf_init(&config, &lpf); +if (result != MA_SUCCESS) { +// Error. +} + +... + +ma_lpf_process_pcm_frames(&lpf, pFramesOut, pFramesIn, frameCount); +``` + +Supported formats are `ma_format_s16` and` ma_format_f32`. If you need to use a different format +you need to convert it yourself beforehand. Input and output frames are always interleaved. + +Filtering can be applied in-place by passing in the same pointer for both the input and output +buffers, like so: + +```c +ma_lpf_process_pcm_frames(&lpf, pMyData, pMyData, frameCount); +``` + +The maximum filter order is limited to `MA_MAX_FILTER_ORDER` which is set to 8. If you need more, +you can chain first and second order filters together. + +```c +for (iFilter = 0; iFilter < filterCount; iFilter += 1) { +ma_lpf2_process_pcm_frames(&lpf2[iFilter], pMyData, pMyData, frameCount); +} +``` + +If you need to change the configuration of the filter, but need to maintain the state of internal +registers you can do so with `ma_lpf_reinit()`. This may be useful if you need to change the sample +rate and/or cutoff frequency dynamically while maintaining smooth transitions. Note that changing the +format or channel count after initialization is invalid and will result in an error. + +The `ma_lpf` object supports a configurable order, but if you only need a first order filter you +may want to consider using `ma_lpf1`. Likewise, if you only need a second order filter you can use +`ma_lpf2`. The advantage of this is that they're lighter weight and a bit more efficient. + +If an even filter order is specified, a series of second order filters will be processed in a +chain. If an odd filter order is specified, a first order filter will be applied, followed by a +series of second order filters in a chain. + + +11.3. High-Pass Filtering +------------------------- +High-pass filtering is achieved with the following APIs: + ++---------+-------------------------------------------+ +| API | Description | ++---------+-------------------------------------------+ +| ma_hpf1 | First order high-pass filter | +| ma_hpf2 | Second order high-pass filter | +| ma_hpf | High order high-pass filter (Butterworth) | ++---------+-------------------------------------------+ + +High-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_hpf1`, +`ma_hpf2` and `ma_hpf`. See example code for low-pass filters for example usage. + + +11.4. Band-Pass Filtering +------------------------- +Band-pass filtering is achieved with the following APIs: + ++---------+-------------------------------+ +| API | Description | ++---------+-------------------------------+ +| ma_bpf2 | Second order band-pass filter | +| ma_bpf | High order band-pass filter | ++---------+-------------------------------+ + +Band-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_bpf2` and +`ma_hpf`. See example code for low-pass filters for example usage. Note that the order for +band-pass filters must be an even number which means there is no first order band-pass filter, +unlike low-pass and high-pass filters. + + +11.5. Notch Filtering +--------------------- +Notch filtering is achieved with the following APIs: + ++-----------+------------------------------------------+ +| API | Description | ++-----------+------------------------------------------+ +| ma_notch2 | Second order notching filter | ++-----------+------------------------------------------+ + + +11.6. Peaking EQ Filtering +------------------------- +Peaking filtering is achieved with the following APIs: + ++----------+------------------------------------------+ +| API | Description | ++----------+------------------------------------------+ +| ma_peak2 | Second order peaking filter | ++----------+------------------------------------------+ + + +11.7. Low Shelf Filtering +------------------------- +Low shelf filtering is achieved with the following APIs: + ++-------------+------------------------------------------+ +| API | Description | ++-------------+------------------------------------------+ +| ma_loshelf2 | Second order low shelf filter | ++-------------+------------------------------------------+ + +Where a high-pass filter is used to eliminate lower frequencies, a low shelf filter can be used to +just turn them down rather than eliminate them entirely. + + +11.8. High Shelf Filtering +-------------------------- +High shelf filtering is achieved with the following APIs: + ++-------------+------------------------------------------+ +| API | Description | ++-------------+------------------------------------------+ +| ma_hishelf2 | Second order high shelf filter | ++-------------+------------------------------------------+ + +The high shelf filter has the same API as the low shelf filter, only you would use `ma_hishelf` +instead of `ma_loshelf`. Where a low shelf filter is used to adjust the volume of low frequencies, +the high shelf filter does the same thing for high frequencies. + + + + +12. Waveform and Noise Generation +================================= + +12.1. Waveforms +--------------- +miniaudio supports generation of sine, square, triangle and sawtooth waveforms. This is achieved +with the `ma_waveform` API. Example: + +```c +ma_waveform_config config = ma_waveform_config_init( +FORMAT, +CHANNELS, +SAMPLE_RATE, +ma_waveform_type_sine, +amplitude, +frequency); + +ma_waveform waveform; +ma_result result = ma_waveform_init(&config, &waveform); +if (result != MA_SUCCESS) { +// Error. +} + +... + +ma_waveform_read_pcm_frames(&waveform, pOutput, frameCount); +``` + +The amplitude, frequency, type, and sample rate can be changed dynamically with +`ma_waveform_set_amplitude()`, `ma_waveform_set_frequency()`, `ma_waveform_set_type()`, and +`ma_waveform_set_sample_rate()` respectively. + +You can invert the waveform by setting the amplitude to a negative value. You can use this to +control whether or not a sawtooth has a positive or negative ramp, for example. + +Below are the supported waveform types: + ++---------------------------+ +| Enum Name | ++---------------------------+ +| ma_waveform_type_sine | +| ma_waveform_type_square | +| ma_waveform_type_triangle | +| ma_waveform_type_sawtooth | ++---------------------------+ + + + +12.2. Noise +----------- +miniaudio supports generation of white, pink and Brownian noise via the `ma_noise` API. Example: + +```c +ma_noise_config config = ma_noise_config_init( +FORMAT, +CHANNELS, +ma_noise_type_white, +SEED, +amplitude); + +ma_noise noise; +ma_result result = ma_noise_init(&config, &noise); +if (result != MA_SUCCESS) { +// Error. +} + +... + +ma_noise_read_pcm_frames(&noise, pOutput, frameCount); +``` + +The noise API uses simple LCG random number generation. It supports a custom seed which is useful +for things like automated testing requiring reproducibility. Setting the seed to zero will default +to `MA_DEFAULT_LCG_SEED`. + +The amplitude and seed can be changed dynamically with `ma_noise_set_amplitude()` and +`ma_noise_set_seed()` respectively. + +By default, the noise API will use different values for different channels. So, for example, the +left side in a stereo stream will be different to the right side. To instead have each channel use +the same random value, set the `duplicateChannels` member of the noise config to true, like so: + +```c +config.duplicateChannels = MA_TRUE; +``` + +Below are the supported noise types. + ++------------------------+ +| Enum Name | ++------------------------+ +| ma_noise_type_white | +| ma_noise_type_pink | +| ma_noise_type_brownian | ++------------------------+ + + + +13. Audio Buffers +================= +miniaudio supports reading from a buffer of raw audio data via the `ma_audio_buffer` API. This can +read from memory that's managed by the application, but can also handle the memory management for +you internally. Memory management is flexible and should support most use cases. + +Audio buffers are initialised using the standard configuration system used everywhere in miniaudio: + +```c +ma_audio_buffer_config config = ma_audio_buffer_config_init( +format, +channels, +sizeInFrames, +pExistingData, +&allocationCallbacks); + +ma_audio_buffer buffer; +result = ma_audio_buffer_init(&config, &buffer); +if (result != MA_SUCCESS) { +// Error. +} + +... + +ma_audio_buffer_uninit(&buffer); +``` + +In the example above, the memory pointed to by `pExistingData` will *not* be copied and is how an +application can do self-managed memory allocation. If you would rather make a copy of the data, use +`ma_audio_buffer_init_copy()`. To uninitialize the buffer, use `ma_audio_buffer_uninit()`. + +Sometimes it can be convenient to allocate the memory for the `ma_audio_buffer` structure and the +raw audio data in a contiguous block of memory. That is, the raw audio data will be located +immediately after the `ma_audio_buffer` structure. To do this, use +`ma_audio_buffer_alloc_and_init()`: + +```c +ma_audio_buffer_config config = ma_audio_buffer_config_init( +format, +channels, +sizeInFrames, +pExistingData, +&allocationCallbacks); + +ma_audio_buffer* pBuffer +result = ma_audio_buffer_alloc_and_init(&config, &pBuffer); +if (result != MA_SUCCESS) { +// Error +} + +... + +ma_audio_buffer_uninit_and_free(&buffer); +``` + +If you initialize the buffer with `ma_audio_buffer_alloc_and_init()` you should uninitialize it +with `ma_audio_buffer_uninit_and_free()`. In the example above, the memory pointed to by +`pExistingData` will be copied into the buffer, which is contrary to the behavior of +`ma_audio_buffer_init()`. + +An audio buffer has a playback cursor just like a decoder. As you read frames from the buffer, the +cursor moves forward. The last parameter (`loop`) can be used to determine if the buffer should +loop. The return value is the number of frames actually read. If this is less than the number of +frames requested it means the end has been reached. This should never happen if the `loop` +parameter is set to true. If you want to manually loop back to the start, you can do so with with +`ma_audio_buffer_seek_to_pcm_frame(pAudioBuffer, 0)`. Below is an example for reading data from an +audio buffer. + +```c +ma_uint64 framesRead = ma_audio_buffer_read_pcm_frames(pAudioBuffer, pFramesOut, desiredFrameCount, isLooping); +if (framesRead < desiredFrameCount) { +// If not looping, this means the end has been reached. This should never happen in looping mode with valid input. +} +``` + +Sometimes you may want to avoid the cost of data movement between the internal buffer and the +output buffer. Instead you can use memory mapping to retrieve a pointer to a segment of data: + +```c +void* pMappedFrames; +ma_uint64 frameCount = frameCountToTryMapping; +ma_result result = ma_audio_buffer_map(pAudioBuffer, &pMappedFrames, &frameCount); +if (result == MA_SUCCESS) { +// Map was successful. The value in frameCount will be how many frames were _actually_ mapped, which may be +// less due to the end of the buffer being reached. +ma_copy_pcm_frames(pFramesOut, pMappedFrames, frameCount, pAudioBuffer->format, pAudioBuffer->channels); + +// You must unmap the buffer. +ma_audio_buffer_unmap(pAudioBuffer, frameCount); +} +``` + +When you use memory mapping, the read cursor is increment by the frame count passed in to +`ma_audio_buffer_unmap()`. If you decide not to process every frame you can pass in a value smaller +than the value returned by `ma_audio_buffer_map()`. The disadvantage to using memory mapping is +that it does not handle looping for you. You can determine if the buffer is at the end for the +purpose of looping with `ma_audio_buffer_at_end()` or by inspecting the return value of +`ma_audio_buffer_unmap()` and checking if it equals `MA_AT_END`. You should not treat `MA_AT_END` +as an error when returned by `ma_audio_buffer_unmap()`. + + + +14. Ring Buffers +================ +miniaudio supports lock free (single producer, single consumer) ring buffers which are exposed via +the `ma_rb` and `ma_pcm_rb` APIs. The `ma_rb` API operates on bytes, whereas the `ma_pcm_rb` +operates on PCM frames. They are otherwise identical as `ma_pcm_rb` is just a wrapper around +`ma_rb`. + +Unlike most other APIs in miniaudio, ring buffers support both interleaved and deinterleaved +streams. The caller can also allocate their own backing memory for the ring buffer to use +internally for added flexibility. Otherwise the ring buffer will manage it's internal memory for +you. + +The examples below use the PCM frame variant of the ring buffer since that's most likely the one +you will want to use. To initialize a ring buffer, do something like the following: + +```c +ma_pcm_rb rb; +ma_result result = ma_pcm_rb_init(FORMAT, CHANNELS, BUFFER_SIZE_IN_FRAMES, NULL, NULL, &rb); +if (result != MA_SUCCESS) { +// Error +} +``` + +The `ma_pcm_rb_init()` function takes the sample format and channel count as parameters because +it's the PCM variant of the ring buffer API. For the regular ring buffer that operates on bytes you +would call `ma_rb_init()` which leaves these out and just takes the size of the buffer in bytes +instead of frames. The fourth parameter is an optional pre-allocated buffer and the fifth parameter +is a pointer to a `ma_allocation_callbacks` structure for custom memory allocation routines. +Passing in `NULL` for this results in `MA_MALLOC()` and `MA_FREE()` being used. + +Use `ma_pcm_rb_init_ex()` if you need a deinterleaved buffer. The data for each sub-buffer is +offset from each other based on the stride. To manage your sub-buffers you can use +`ma_pcm_rb_get_subbuffer_stride()`, `ma_pcm_rb_get_subbuffer_offset()` and +`ma_pcm_rb_get_subbuffer_ptr()`. + +Use `ma_pcm_rb_acquire_read()` and `ma_pcm_rb_acquire_write()` to retrieve a pointer to a section +of the ring buffer. You specify the number of frames you need, and on output it will set to what +was actually acquired. If the read or write pointer is positioned such that the number of frames +requested will require a loop, it will be clamped to the end of the buffer. Therefore, the number +of frames you're given may be less than the number you requested. + +After calling `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()`, you do your work on the +buffer and then "commit" it with `ma_pcm_rb_commit_read()` or `ma_pcm_rb_commit_write()`. This is +where the read/write pointers are updated. When you commit you need to pass in the buffer that was +returned by the earlier call to `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()` and is +only used for validation. The number of frames passed to `ma_pcm_rb_commit_read()` and +`ma_pcm_rb_commit_write()` is what's used to increment the pointers, and can be less that what was +originally requested. + +If you want to correct for drift between the write pointer and the read pointer you can use a +combination of `ma_pcm_rb_pointer_distance()`, `ma_pcm_rb_seek_read()` and +`ma_pcm_rb_seek_write()`. Note that you can only move the pointers forward, and you should only +move the read pointer forward via the consumer thread, and the write pointer forward by the +producer thread. If there is too much space between the pointers, move the read pointer forward. If +there is too little space between the pointers, move the write pointer forward. + +You can use a ring buffer at the byte level instead of the PCM frame level by using the `ma_rb` +API. This is exactly the same, only you will use the `ma_rb` functions instead of `ma_pcm_rb` and +instead of frame counts you will pass around byte counts. + +The maximum size of the buffer in bytes is `0x7FFFFFFF-(MA_SIMD_ALIGNMENT-1)` due to the most +significant bit being used to encode a loop flag and the internally managed buffers always being +aligned to `MA_SIMD_ALIGNMENT`. + +Note that the ring buffer is only thread safe when used by a single consumer thread and single +producer thread. + + + +15. Backends +============ +The following backends are supported by miniaudio. These are listed in order of default priority. +When no backend is specified when initializing a context or device, miniaudio will attempt to use +each of these backends in the order listed in the table below. + +Note that backends that are not usable by the build target will not be included in the build. For +example, ALSA, which is specific to Linux, will not be included in the Windows build. + ++-------------+-----------------------+--------------------------------------------------------+ +| Name | Enum Name | Supported Operating Systems | ++-------------+-----------------------+--------------------------------------------------------+ +| WASAPI | ma_backend_wasapi | Windows Vista+ | +| DirectSound | ma_backend_dsound | Windows XP+ | +| WinMM | ma_backend_winmm | Windows 95+ | +| Core Audio | ma_backend_coreaudio | macOS, iOS | +| sndio | ma_backend_sndio | OpenBSD | +| audio(4) | ma_backend_audio4 | NetBSD, OpenBSD | +| OSS | ma_backend_oss | FreeBSD | +| PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) | +| ALSA | ma_backend_alsa | Linux | +| JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) | +| AAudio | ma_backend_aaudio | Android 8+ | +| OpenSL ES | ma_backend_opensl | Android (API level 16+) | +| Web Audio | ma_backend_webaudio | Web (via Emscripten) | +| Custom | ma_backend_custom | Cross Platform | +| Null | ma_backend_null | Cross Platform (not used on Web) | ++-------------+-----------------------+--------------------------------------------------------+ + +Some backends have some nuance details you may want to be aware of. + +15.1. WASAPI +------------ +- Low-latency shared mode will be disabled when using an application-defined sample rate which is +different to the device's native sample rate. To work around this, set `wasapi.noAutoConvertSRC` +to true in the device config. This is due to IAudioClient3_InitializeSharedAudioStream() failing +when the `AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM` flag is specified. Setting wasapi.noAutoConvertSRC +will result in miniaudio's internal resampler being used instead which will in turn enable the +use of low-latency shared mode. + +15.2. PulseAudio +---------------- +- If you experience bad glitching/noise on Arch Linux, consider this fix from the Arch wiki: +https://wiki.archlinux.org/index.php/PulseAudio/Troubleshooting#Glitches,_skips_or_crackling. +Alternatively, consider using a different backend such as ALSA. + +15.3. Android +------------- +- To capture audio on Android, remember to add the RECORD_AUDIO permission to your manifest: +`` +- With OpenSL|ES, only a single ma_context can be active at any given time. This is due to a +limitation with OpenSL|ES. +- With AAudio, only default devices are enumerated. This is due to AAudio not having an enumeration +API (devices are enumerated through Java). You can however perform your own device enumeration +through Java and then set the ID in the ma_device_id structure (ma_device_id.aaudio) and pass it +to ma_device_init(). +- The backend API will perform resampling where possible. The reason for this as opposed to using +miniaudio's built-in resampler is to take advantage of any potential device-specific +optimizations the driver may implement. + +BSD +--- +- The sndio backend is currently only enabled on OpenBSD builds. +- The audio(4) backend is supported on OpenBSD, but you may need to disable sndiod before you can +use it. + +15.4. UWP +--------- +- UWP only supports default playback and capture devices. +- UWP requires the Microphone capability to be enabled in the application's manifest (Package.appxmanifest): + +``` + +... + + + + +``` + +15.5. Web Audio / Emscripten +---------------------------- +- You cannot use `-std=c*` compiler flags, nor `-ansi`. This only applies to the Emscripten build. +- The first time a context is initialized it will create a global object called "miniaudio" whose +primary purpose is to act as a factory for device objects. +- Currently the Web Audio backend uses ScriptProcessorNode's, but this may need to change later as +they've been deprecated. +- Google has implemented a policy in their browsers that prevent automatic media output without +first receiving some kind of user input. The following web page has additional details: +https://developers.google.com/web/updates/2017/09/autoplay-policy-changes. Starting the device +may fail if you try to start playback without first handling some kind of user input. + + + +16. Optimization Tips +===================== +See below for some tips on improving performance. + +16.1. Low Level API +------------------- +- In the data callback, if your data is already clipped prior to copying it into the output buffer, +set the `noClip` config option in the device config to true. This will disable miniaudio's built +in clipping function. +- By default, miniaudio will pre-silence the data callback's output buffer. If you know that you +will always write valid data to the output buffer you can disable pre-silencing by setting the +`noPreSilence` config option in the device config to true. + +16.2. High Level API +-------------------- +- If a sound does not require doppler or pitch shifting, consider disabling pitching by +initializing the sound with the `MA_SOUND_FLAG_NO_PITCH` flag. +- If a sound does not require spatialization, disable it by initializing the sound with the +`MA_SOUND_FLAG_NO_SPATIALIZATION` flag. It can be re-enabled again post-initialization with +`ma_sound_set_spatialization_enabled()`. +- If you know all of your sounds will always be the same sample rate, set the engine's sample +rate to match that of the sounds. Likewise, if you're using a self-managed resource manager, +consider setting the decoded sample rate to match your sounds. By configuring everything to +use a consistent sample rate, sample rate conversion can be avoided. + + + +17. Miscellaneous Notes +======================= +- Automatic stream routing is enabled on a per-backend basis. Support is explicitly enabled for +WASAPI and Core Audio, however other backends such as PulseAudio may naturally support it, though +not all have been tested. +- When compiling with VC6 and earlier, decoding is restricted to files less than 2GB in size. This +is due to 64-bit file APIs not being available. +*/ + +#ifndef miniaudio_h +#define miniaudio_h + +#ifdef __cplusplus +extern "C" { +#endif + +#define MA_STRINGIFY(x) #x +#define MA_XSTRINGIFY(x) MA_STRINGIFY(x) + +#define MA_VERSION_MAJOR 0 +#define MA_VERSION_MINOR 11 +#define MA_VERSION_REVISION 18 +#define MA_VERSION_STRING MA_XSTRINGIFY(MA_VERSION_MAJOR) "." MA_XSTRINGIFY(MA_VERSION_MINOR) "." MA_XSTRINGIFY(MA_VERSION_REVISION) + +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ +#pragma warning(disable:4214) /* nonstandard extension used: bit field types other than int */ +#pragma warning(disable:4324) /* structure was padded due to alignment specifier */ +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ +#if defined(__clang__) +#pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */ +#endif +#endif + + + +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || defined(_M_ARM64) || defined(__powerpc64__) +#define MA_SIZEOF_PTR 8 +#else +#define MA_SIZEOF_PTR 4 +#endif + +#include /* For size_t. */ + + /* Sized types. */ +#if defined(MA_USE_STDINT) +#include + typedef int8_t ma_int8; + typedef uint8_t ma_uint8; + typedef int16_t ma_int16; + typedef uint16_t ma_uint16; + typedef int32_t ma_int32; + typedef uint32_t ma_uint32; + typedef int64_t ma_int64; + typedef uint64_t ma_uint64; +#else + typedef signed char ma_int8; + typedef unsigned char ma_uint8; + typedef signed short ma_int16; + typedef unsigned short ma_uint16; + typedef signed int ma_int32; + typedef unsigned int ma_uint32; +#if defined(_MSC_VER) && !defined(__clang__) + typedef signed __int64 ma_int64; + typedef unsigned __int64 ma_uint64; +#else +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wlong-long" +#if defined(__clang__) +#pragma GCC diagnostic ignored "-Wc++11-long-long" +#endif +#endif + typedef signed long long ma_int64; + typedef unsigned long long ma_uint64; +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic pop +#endif +#endif +#endif /* MA_USE_STDINT */ + +#if MA_SIZEOF_PTR == 8 + typedef ma_uint64 ma_uintptr; +#else + typedef ma_uint32 ma_uintptr; +#endif + + typedef ma_uint8 ma_bool8; + typedef ma_uint32 ma_bool32; +#define MA_TRUE 1 +#define MA_FALSE 0 + + /* These float types are not used universally by miniaudio. It's to simplify some macro expansion for atomic types. */ + typedef float ma_float; + typedef double ma_double; + + typedef void* ma_handle; + typedef void* ma_ptr; + + /* + ma_proc is annoying because when compiling with GCC we get pendantic warnings about converting + between `void*` and `void (*)()`. We can't use `void (*)()` with MSVC however, because we'll get + warning C4191 about "type cast between incompatible function types". To work around this I'm going + to use a different data type depending on the compiler. + */ +#if defined(__GNUC__) + typedef void (*ma_proc)(void); +#else + typedef void* ma_proc; +#endif + +#if defined(_MSC_VER) && !defined(_WCHAR_T_DEFINED) + typedef ma_uint16 wchar_t; +#endif + + /* Define NULL for some compilers. */ +#ifndef NULL +#define NULL 0 +#endif + +#if defined(SIZE_MAX) +#define MA_SIZE_MAX SIZE_MAX +#else +#define MA_SIZE_MAX 0xFFFFFFFF /* When SIZE_MAX is not defined by the standard library just default to the maximum 32-bit unsigned integer. */ +#endif + + + /* Platform/backend detection. */ +#if defined(_WIN32) || defined(__COSMOPOLITAN__) +#define MA_WIN32 +#if defined(MA_FORCE_UWP) || (defined(WINAPI_FAMILY) && ((defined(WINAPI_FAMILY_PC_APP) && WINAPI_FAMILY == WINAPI_FAMILY_PC_APP) || (defined(WINAPI_FAMILY_PHONE_APP) && WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP))) +#define MA_WIN32_UWP +#elif defined(WINAPI_FAMILY) && (defined(WINAPI_FAMILY_GAMES) && WINAPI_FAMILY == WINAPI_FAMILY_GAMES) +#define MA_WIN32_GDK +#else +#define MA_WIN32_DESKTOP +#endif +#endif +#if !defined(_WIN32) /* If it's not Win32, assume POSIX. */ +#define MA_POSIX + + /* + Use the MA_NO_PTHREAD_IN_HEADER option at your own risk. This is intentionally undocumented. + You can use this to avoid including pthread.h in the header section. The downside is that it + results in some fixed sized structures being declared for the various types that are used in + miniaudio. The risk here is that these types might be too small for a given platform. This + risk is yours to take and no support will be offered if you enable this option. + */ +#ifndef MA_NO_PTHREAD_IN_HEADER +#include /* Unfortunate #include, but needed for pthread_t, pthread_mutex_t and pthread_cond_t types. */ + typedef pthread_t ma_pthread_t; + typedef pthread_mutex_t ma_pthread_mutex_t; + typedef pthread_cond_t ma_pthread_cond_t; +#else + typedef ma_uintptr ma_pthread_t; + typedef union ma_pthread_mutex_t { char __data[40]; ma_uint64 __alignment; } ma_pthread_mutex_t; + typedef union ma_pthread_cond_t { char __data[48]; ma_uint64 __alignment; } ma_pthread_cond_t; +#endif + +#if defined(__unix__) +#define MA_UNIX +#endif +#if defined(__linux__) +#define MA_LINUX +#endif +#if defined(__APPLE__) +#define MA_APPLE +#endif +#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) +#define MA_BSD +#endif +#if defined(__ANDROID__) +#define MA_ANDROID +#endif +#if defined(__EMSCRIPTEN__) +#define MA_EMSCRIPTEN +#endif +#if defined(__ORBIS__) +#define MA_ORBIS +#endif +#if defined(__PROSPERO__) +#define MA_PROSPERO +#endif +#if defined(__NX__) +#define MA_NX +#endif +#if defined(__BEOS__) || defined(__HAIKU__) +#define MA_BEOS +#endif +#if defined(__HAIKU__) +#define MA_HAIKU +#endif +#endif + +#if defined(__has_c_attribute) +#if __has_c_attribute(fallthrough) +#define MA_FALLTHROUGH [[fallthrough]] +#endif +#endif +#if !defined(MA_FALLTHROUGH) && defined(__has_attribute) && (defined(__clang__) || defined(__GNUC__)) +#if __has_attribute(fallthrough) +#define MA_FALLTHROUGH __attribute__((fallthrough)) +#endif +#endif +#if !defined(MA_FALLTHROUGH) +#define MA_FALLTHROUGH ((void)0) +#endif + +#ifdef _MSC_VER +#define MA_INLINE __forceinline + + /* noinline was introduced in Visual Studio 2005. */ +#if _MSC_VER >= 1400 +#define MA_NO_INLINE __declspec(noinline) +#else +#define MA_NO_INLINE +#endif +#elif defined(__GNUC__) + /* + I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when + the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some + case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the + command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue + I am using "__inline__" only when we're compiling in strict ANSI mode. + */ +#if defined(__STRICT_ANSI__) +#define MA_GNUC_INLINE_HINT __inline__ +#else +#define MA_GNUC_INLINE_HINT inline +#endif + +#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 2)) || defined(__clang__) +#define MA_INLINE MA_GNUC_INLINE_HINT __attribute__((always_inline)) +#define MA_NO_INLINE __attribute__((noinline)) +#else +#define MA_INLINE MA_GNUC_INLINE_HINT +#define MA_NO_INLINE __attribute__((noinline)) +#endif +#elif defined(__WATCOMC__) +#define MA_INLINE __inline +#define MA_NO_INLINE +#else +#define MA_INLINE +#define MA_NO_INLINE +#endif + + /* MA_DLL is not officially supported. You're on your own if you want to use this. */ +#if defined(MA_DLL) +#if defined(_WIN32) +#define MA_DLL_IMPORT __declspec(dllimport) +#define MA_DLL_EXPORT __declspec(dllexport) +#define MA_DLL_PRIVATE static +#else +#if defined(__GNUC__) && __GNUC__ >= 4 +#define MA_DLL_IMPORT __attribute__((visibility("default"))) +#define MA_DLL_EXPORT __attribute__((visibility("default"))) +#define MA_DLL_PRIVATE __attribute__((visibility("hidden"))) +#else +#define MA_DLL_IMPORT +#define MA_DLL_EXPORT +#define MA_DLL_PRIVATE static +#endif +#endif +#endif + +#if !defined(MA_API) +#if defined(MA_DLL) +#if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION) +#define MA_API MA_DLL_EXPORT +#else +#define MA_API MA_DLL_IMPORT +#endif +#else +#define MA_API extern +#endif +#endif + +#if !defined(MA_STATIC) +#if defined(MA_DLL) +#define MA_PRIVATE MA_DLL_PRIVATE +#else +#define MA_PRIVATE static +#endif +#endif + + + /* SIMD alignment in bytes. Currently set to 32 bytes in preparation for future AVX optimizations. */ +#define MA_SIMD_ALIGNMENT 32 + + /* + Special wchar_t type to ensure any structures in the public sections that reference it have a + consistent size across all platforms. + + On Windows, wchar_t is 2 bytes, whereas everywhere else it's 4 bytes. Since Windows likes to use + wchar_t for it's IDs, we need a special explicitly sized wchar type that is always 2 bytes on all + platforms. + */ +#if !defined(MA_POSIX) && defined(MA_WIN32) + typedef wchar_t ma_wchar_win32; +#else + typedef ma_uint16 ma_wchar_win32; +#endif + + + + /* + Logging Levels + ============== + Log levels are only used to give logging callbacks some context as to the severity of a log message + so they can do filtering. All log levels will be posted to registered logging callbacks. If you + don't want to output a certain log level you can discriminate against the log level in the callback. + + MA_LOG_LEVEL_DEBUG + Used for debugging. Useful for debug and test builds, but should be disabled in release builds. + + MA_LOG_LEVEL_INFO + Informational logging. Useful for debugging. This will never be called from within the data + callback. + + MA_LOG_LEVEL_WARNING + Warnings. You should enable this in you development builds and action them when encounted. These + logs usually indicate a potential problem or misconfiguration, but still allow you to keep + running. This will never be called from within the data callback. + + MA_LOG_LEVEL_ERROR + Error logging. This will be fired when an operation fails and is subsequently aborted. This can + be fired from within the data callback, in which case the device will be stopped. You should + always have this log level enabled. + */ + typedef enum + { + MA_LOG_LEVEL_DEBUG = 4, + MA_LOG_LEVEL_INFO = 3, + MA_LOG_LEVEL_WARNING = 2, + MA_LOG_LEVEL_ERROR = 1 + } ma_log_level; + + /* + Variables needing to be accessed atomically should be declared with this macro for two reasons: + + 1) It allows people who read the code to identify a variable as such; and + 2) It forces alignment on platforms where it's required or optimal. + + Note that for x86/64, alignment is not strictly necessary, but does have some performance + implications. Where supported by the compiler, alignment will be used, but otherwise if the CPU + architecture does not require it, it will simply leave it unaligned. This is the case with old + versions of Visual Studio, which I've confirmed with at least VC6. + */ +#if !defined(_MSC_VER) && defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +#include +#define MA_ATOMIC(alignment, type) _Alignas(alignment) type +#else +#if defined(__GNUC__) + /* GCC-style compilers. */ +#define MA_ATOMIC(alignment, type) type __attribute__((aligned(alignment))) +#elif defined(_MSC_VER) && _MSC_VER > 1200 /* 1200 = VC6. Alignment not supported, but not necessary because x86 is the only supported target. */ + /* MSVC. */ +#define MA_ATOMIC(alignment, type) __declspec(align(alignment)) type +#else + /* Other compilers. */ +#define MA_ATOMIC(alignment, type) type +#endif +#endif + + typedef struct ma_context ma_context; + typedef struct ma_device ma_device; + + typedef ma_uint8 ma_channel; + typedef enum + { + MA_CHANNEL_NONE = 0, + MA_CHANNEL_MONO = 1, + MA_CHANNEL_FRONT_LEFT = 2, + MA_CHANNEL_FRONT_RIGHT = 3, + MA_CHANNEL_FRONT_CENTER = 4, + MA_CHANNEL_LFE = 5, + MA_CHANNEL_BACK_LEFT = 6, + MA_CHANNEL_BACK_RIGHT = 7, + MA_CHANNEL_FRONT_LEFT_CENTER = 8, + MA_CHANNEL_FRONT_RIGHT_CENTER = 9, + MA_CHANNEL_BACK_CENTER = 10, + MA_CHANNEL_SIDE_LEFT = 11, + MA_CHANNEL_SIDE_RIGHT = 12, + MA_CHANNEL_TOP_CENTER = 13, + MA_CHANNEL_TOP_FRONT_LEFT = 14, + MA_CHANNEL_TOP_FRONT_CENTER = 15, + MA_CHANNEL_TOP_FRONT_RIGHT = 16, + MA_CHANNEL_TOP_BACK_LEFT = 17, + MA_CHANNEL_TOP_BACK_CENTER = 18, + MA_CHANNEL_TOP_BACK_RIGHT = 19, + MA_CHANNEL_AUX_0 = 20, + MA_CHANNEL_AUX_1 = 21, + MA_CHANNEL_AUX_2 = 22, + MA_CHANNEL_AUX_3 = 23, + MA_CHANNEL_AUX_4 = 24, + MA_CHANNEL_AUX_5 = 25, + MA_CHANNEL_AUX_6 = 26, + MA_CHANNEL_AUX_7 = 27, + MA_CHANNEL_AUX_8 = 28, + MA_CHANNEL_AUX_9 = 29, + MA_CHANNEL_AUX_10 = 30, + MA_CHANNEL_AUX_11 = 31, + MA_CHANNEL_AUX_12 = 32, + MA_CHANNEL_AUX_13 = 33, + MA_CHANNEL_AUX_14 = 34, + MA_CHANNEL_AUX_15 = 35, + MA_CHANNEL_AUX_16 = 36, + MA_CHANNEL_AUX_17 = 37, + MA_CHANNEL_AUX_18 = 38, + MA_CHANNEL_AUX_19 = 39, + MA_CHANNEL_AUX_20 = 40, + MA_CHANNEL_AUX_21 = 41, + MA_CHANNEL_AUX_22 = 42, + MA_CHANNEL_AUX_23 = 43, + MA_CHANNEL_AUX_24 = 44, + MA_CHANNEL_AUX_25 = 45, + MA_CHANNEL_AUX_26 = 46, + MA_CHANNEL_AUX_27 = 47, + MA_CHANNEL_AUX_28 = 48, + MA_CHANNEL_AUX_29 = 49, + MA_CHANNEL_AUX_30 = 50, + MA_CHANNEL_AUX_31 = 51, + MA_CHANNEL_LEFT = MA_CHANNEL_FRONT_LEFT, + MA_CHANNEL_RIGHT = MA_CHANNEL_FRONT_RIGHT, + MA_CHANNEL_POSITION_COUNT = (MA_CHANNEL_AUX_31 + 1) + } _ma_channel_position; /* Do not use `_ma_channel_position` directly. Use `ma_channel` instead. */ + + typedef enum + { + MA_SUCCESS = 0, + MA_ERROR = -1, /* A generic error. */ + MA_INVALID_ARGS = -2, + MA_INVALID_OPERATION = -3, + MA_OUT_OF_MEMORY = -4, + MA_OUT_OF_RANGE = -5, + MA_ACCESS_DENIED = -6, + MA_DOES_NOT_EXIST = -7, + MA_ALREADY_EXISTS = -8, + MA_TOO_MANY_OPEN_FILES = -9, + MA_INVALID_FILE = -10, + MA_TOO_BIG = -11, + MA_PATH_TOO_LONG = -12, + MA_NAME_TOO_LONG = -13, + MA_NOT_DIRECTORY = -14, + MA_IS_DIRECTORY = -15, + MA_DIRECTORY_NOT_EMPTY = -16, + MA_AT_END = -17, + MA_NO_SPACE = -18, + MA_BUSY = -19, + MA_IO_ERROR = -20, + MA_INTERRUPT = -21, + MA_UNAVAILABLE = -22, + MA_ALREADY_IN_USE = -23, + MA_BAD_ADDRESS = -24, + MA_BAD_SEEK = -25, + MA_BAD_PIPE = -26, + MA_DEADLOCK = -27, + MA_TOO_MANY_LINKS = -28, + MA_NOT_IMPLEMENTED = -29, + MA_NO_MESSAGE = -30, + MA_BAD_MESSAGE = -31, + MA_NO_DATA_AVAILABLE = -32, + MA_INVALID_DATA = -33, + MA_TIMEOUT = -34, + MA_NO_NETWORK = -35, + MA_NOT_UNIQUE = -36, + MA_NOT_SOCKET = -37, + MA_NO_ADDRESS = -38, + MA_BAD_PROTOCOL = -39, + MA_PROTOCOL_UNAVAILABLE = -40, + MA_PROTOCOL_NOT_SUPPORTED = -41, + MA_PROTOCOL_FAMILY_NOT_SUPPORTED = -42, + MA_ADDRESS_FAMILY_NOT_SUPPORTED = -43, + MA_SOCKET_NOT_SUPPORTED = -44, + MA_CONNECTION_RESET = -45, + MA_ALREADY_CONNECTED = -46, + MA_NOT_CONNECTED = -47, + MA_CONNECTION_REFUSED = -48, + MA_NO_HOST = -49, + MA_IN_PROGRESS = -50, + MA_CANCELLED = -51, + MA_MEMORY_ALREADY_MAPPED = -52, + + /* General non-standard errors. */ + MA_CRC_MISMATCH = -100, + + /* General miniaudio-specific errors. */ + MA_FORMAT_NOT_SUPPORTED = -200, + MA_DEVICE_TYPE_NOT_SUPPORTED = -201, + MA_SHARE_MODE_NOT_SUPPORTED = -202, + MA_NO_BACKEND = -203, + MA_NO_DEVICE = -204, + MA_API_NOT_FOUND = -205, + MA_INVALID_DEVICE_CONFIG = -206, + MA_LOOP = -207, + MA_BACKEND_NOT_ENABLED = -208, + + /* State errors. */ + MA_DEVICE_NOT_INITIALIZED = -300, + MA_DEVICE_ALREADY_INITIALIZED = -301, + MA_DEVICE_NOT_STARTED = -302, + MA_DEVICE_NOT_STOPPED = -303, + + /* Operation errors. */ + MA_FAILED_TO_INIT_BACKEND = -400, + MA_FAILED_TO_OPEN_BACKEND_DEVICE = -401, + MA_FAILED_TO_START_BACKEND_DEVICE = -402, + MA_FAILED_TO_STOP_BACKEND_DEVICE = -403 + } ma_result; + + +#define MA_MIN_CHANNELS 1 +#ifndef MA_MAX_CHANNELS +#define MA_MAX_CHANNELS 254 +#endif + +#ifndef MA_MAX_FILTER_ORDER +#define MA_MAX_FILTER_ORDER 8 +#endif + + typedef enum + { + ma_stream_format_pcm = 0 + } ma_stream_format; + + typedef enum + { + ma_stream_layout_interleaved = 0, + ma_stream_layout_deinterleaved + } ma_stream_layout; + + typedef enum + { + ma_dither_mode_none = 0, + ma_dither_mode_rectangle, + ma_dither_mode_triangle + } ma_dither_mode; + + typedef enum + { + /* + I like to keep these explicitly defined because they're used as a key into a lookup table. When items are + added to this, make sure there are no gaps and that they're added to the lookup table in ma_get_bytes_per_sample(). + */ + ma_format_unknown = 0, /* Mainly used for indicating an error, but also used as the default for the output format for decoders. */ + ma_format_u8 = 1, + ma_format_s16 = 2, /* Seems to be the most widely supported format. */ + ma_format_s24 = 3, /* Tightly packed. 3 bytes per sample. */ + ma_format_s32 = 4, + ma_format_f32 = 5, + ma_format_count + } ma_format; + + typedef enum + { + /* Standard rates need to be in priority order. */ + ma_standard_sample_rate_48000 = 48000, /* Most common */ + ma_standard_sample_rate_44100 = 44100, + + ma_standard_sample_rate_32000 = 32000, /* Lows */ + ma_standard_sample_rate_24000 = 24000, + ma_standard_sample_rate_22050 = 22050, + + ma_standard_sample_rate_88200 = 88200, /* Highs */ + ma_standard_sample_rate_96000 = 96000, + ma_standard_sample_rate_176400 = 176400, + ma_standard_sample_rate_192000 = 192000, + + ma_standard_sample_rate_16000 = 16000, /* Extreme lows */ + ma_standard_sample_rate_11025 = 11250, + ma_standard_sample_rate_8000 = 8000, + + ma_standard_sample_rate_352800 = 352800, /* Extreme highs */ + ma_standard_sample_rate_384000 = 384000, + + ma_standard_sample_rate_min = ma_standard_sample_rate_8000, + ma_standard_sample_rate_max = ma_standard_sample_rate_384000, + ma_standard_sample_rate_count = 14 /* Need to maintain the count manually. Make sure this is updated if items are added to enum. */ + } ma_standard_sample_rate; + + + typedef enum + { + ma_channel_mix_mode_rectangular = 0, /* Simple averaging based on the plane(s) the channel is sitting on. */ + ma_channel_mix_mode_simple, /* Drop excess channels; zeroed out extra channels. */ + ma_channel_mix_mode_custom_weights, /* Use custom weights specified in ma_channel_converter_config. */ + ma_channel_mix_mode_default = ma_channel_mix_mode_rectangular + } ma_channel_mix_mode; + + typedef enum + { + ma_standard_channel_map_microsoft, + ma_standard_channel_map_alsa, + ma_standard_channel_map_rfc3551, /* Based off AIFF. */ + ma_standard_channel_map_flac, + ma_standard_channel_map_vorbis, + ma_standard_channel_map_sound4, /* FreeBSD's sound(4). */ + ma_standard_channel_map_sndio, /* www.sndio.org/tips.html */ + ma_standard_channel_map_webaudio = ma_standard_channel_map_flac, /* https://webaudio.github.io/web-audio-api/#ChannelOrdering. Only 1, 2, 4 and 6 channels are defined, but can fill in the gaps with logical assumptions. */ + ma_standard_channel_map_default = ma_standard_channel_map_microsoft + } ma_standard_channel_map; + + typedef enum + { + ma_performance_profile_low_latency = 0, + ma_performance_profile_conservative + } ma_performance_profile; + + + typedef struct + { + void* pUserData; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); + } ma_allocation_callbacks; + + typedef struct + { + ma_int32 state; + } ma_lcg; + + + /* + Atomics. + + These are typesafe structures to prevent errors as a result of forgetting to reference variables atomically. It's too + easy to introduce subtle bugs where you accidentally do a regular assignment instead of an atomic load/store, etc. By + using a struct we can enforce the use of atomics at compile time. + + These types are declared in the header section because we need to reference them in structs below, but functions for + using them are only exposed in the implementation section. I do not want these to be part of the public API. + + There's a few downsides to this system. The first is that you need to declare a new struct for each type. Below are + some macros to help with the declarations. They will be named like so: + + ma_atomic_uint32 - atomic ma_uint32 + ma_atomic_int32 - atomic ma_int32 + ma_atomic_uint64 - atomic ma_uint64 + ma_atomic_float - atomic float + ma_atomic_bool32 - atomic ma_bool32 + + The other downside is that atomic pointers are extremely messy. You need to declare a new struct for each specific + type of pointer you need to make atomic. For example, an atomic ma_node* will look like this: + + MA_ATOMIC_SAFE_TYPE_IMPL_PTR(node) + + Which will declare a type struct that's named like so: + + ma_atomic_ptr_node + + Functions to use the atomic types are declared in the implementation section. All atomic functions are prefixed with + the name of the struct. For example: + + ma_atomic_uint32_set() - Atomic store of ma_uint32 + ma_atomic_uint32_get() - Atomic load of ma_uint32 + etc. + + For pointer types it's the same, which makes them a bit messy to use due to the length of each function name, but in + return you get type safety and enforcement of atomic operations. + */ +#define MA_ATOMIC_SAFE_TYPE_DECL(c89TypeExtension, typeSize, type) \ + typedef struct \ + { \ + MA_ATOMIC(typeSize, ma_##type) value; \ + } ma_atomic_##type; \ + +#define MA_ATOMIC_SAFE_TYPE_DECL_PTR(type) \ + typedef struct \ + { \ + MA_ATOMIC(MA_SIZEOF_PTR, ma_##type*) value; \ + } ma_atomic_ptr_##type; \ + + MA_ATOMIC_SAFE_TYPE_DECL(32, 4, uint32) + MA_ATOMIC_SAFE_TYPE_DECL(i32, 4, int32) + MA_ATOMIC_SAFE_TYPE_DECL(64, 8, uint64) + MA_ATOMIC_SAFE_TYPE_DECL(f32, 4, float) + MA_ATOMIC_SAFE_TYPE_DECL(32, 4, bool32) + + + /* Spinlocks are 32-bit for compatibility reasons. */ + typedef ma_uint32 ma_spinlock; + +#ifndef MA_NO_THREADING + /* Thread priorities should be ordered such that the default priority of the worker thread is 0. */ + typedef enum + { + ma_thread_priority_idle = -5, + ma_thread_priority_lowest = -4, + ma_thread_priority_low = -3, + ma_thread_priority_normal = -2, + ma_thread_priority_high = -1, + ma_thread_priority_highest = 0, + ma_thread_priority_realtime = 1, + ma_thread_priority_default = 0 + } ma_thread_priority; + +#if defined(MA_POSIX) + typedef ma_pthread_t ma_thread; +#elif defined(MA_WIN32) + typedef ma_handle ma_thread; +#endif + +#if defined(MA_POSIX) + typedef ma_pthread_mutex_t ma_mutex; +#elif defined(MA_WIN32) + typedef ma_handle ma_mutex; +#endif + +#if defined(MA_POSIX) + typedef struct + { + ma_uint32 value; + ma_pthread_mutex_t lock; + ma_pthread_cond_t cond; + } ma_event; +#elif defined(MA_WIN32) + typedef ma_handle ma_event; +#endif + +#if defined(MA_POSIX) + typedef struct + { + int value; + ma_pthread_mutex_t lock; + ma_pthread_cond_t cond; + } ma_semaphore; +#elif defined(MA_WIN32) + typedef ma_handle ma_semaphore; +#endif +#else + /* MA_NO_THREADING is set which means threading is disabled. Threading is required by some API families. If any of these are enabled we need to throw an error. */ +#ifndef MA_NO_DEVICE_IO +#error "MA_NO_THREADING cannot be used without MA_NO_DEVICE_IO"; +#endif +#endif /* MA_NO_THREADING */ + + + /* + Retrieves the version of miniaudio as separated integers. Each component can be NULL if it's not required. + */ + MA_API void ma_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); + + /* + Retrieves the version of miniaudio as a string which can be useful for logging purposes. + */ + MA_API const char* ma_version_string(void); + + + /************************************************************************************************************************************************************** + + Logging + + **************************************************************************************************************************************************************/ +#include /* For va_list. */ + +#if defined(__has_attribute) +#if __has_attribute(format) +#define MA_ATTRIBUTE_FORMAT(fmt, va) __attribute__((format(printf, fmt, va))) +#endif +#endif +#ifndef MA_ATTRIBUTE_FORMAT +#define MA_ATTRIBUTE_FORMAT(fmt, va) +#endif + +#ifndef MA_MAX_LOG_CALLBACKS +#define MA_MAX_LOG_CALLBACKS 4 +#endif + + + /* + The callback for handling log messages. + + + Parameters + ---------- + pUserData (in) + The user data pointer that was passed into ma_log_register_callback(). + + logLevel (in) + The log level. This can be one of the following: + + +----------------------+ + | Log Level | + +----------------------+ + | MA_LOG_LEVEL_DEBUG | + | MA_LOG_LEVEL_INFO | + | MA_LOG_LEVEL_WARNING | + | MA_LOG_LEVEL_ERROR | + +----------------------+ + + pMessage (in) + The log message. + */ + typedef void (* ma_log_callback_proc)(void* pUserData, ma_uint32 level, const char* pMessage); + + typedef struct + { + ma_log_callback_proc onLog; + void* pUserData; + } ma_log_callback; + + MA_API ma_log_callback ma_log_callback_init(ma_log_callback_proc onLog, void* pUserData); + + + typedef struct + { + ma_log_callback callbacks[MA_MAX_LOG_CALLBACKS]; + ma_uint32 callbackCount; + ma_allocation_callbacks allocationCallbacks; /* Need to store these persistently because ma_log_postv() might need to allocate a buffer on the heap. */ +#ifndef MA_NO_THREADING + ma_mutex lock; /* For thread safety just to make it easier and safer for the logging implementation. */ +#endif + } ma_log; + + MA_API ma_result ma_log_init(const ma_allocation_callbacks* pAllocationCallbacks, ma_log* pLog); + MA_API void ma_log_uninit(ma_log* pLog); + MA_API ma_result ma_log_register_callback(ma_log* pLog, ma_log_callback callback); + MA_API ma_result ma_log_unregister_callback(ma_log* pLog, ma_log_callback callback); + MA_API ma_result ma_log_post(ma_log* pLog, ma_uint32 level, const char* pMessage); + MA_API ma_result ma_log_postv(ma_log* pLog, ma_uint32 level, const char* pFormat, va_list args); + MA_API ma_result ma_log_postf(ma_log* pLog, ma_uint32 level, const char* pFormat, ...) MA_ATTRIBUTE_FORMAT(3, 4); + + + /************************************************************************************************************************************************************** + + Biquad Filtering + + **************************************************************************************************************************************************************/ + typedef union + { + float f32; + ma_int32 s32; + } ma_biquad_coefficient; + + typedef struct + { + ma_format format; + ma_uint32 channels; + double b0; + double b1; + double b2; + double a0; + double a1; + double a2; + } ma_biquad_config; + + MA_API ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2); + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient b0; + ma_biquad_coefficient b1; + ma_biquad_coefficient b2; + ma_biquad_coefficient a1; + ma_biquad_coefficient a2; + ma_biquad_coefficient* pR1; + ma_biquad_coefficient* pR2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_biquad; + + MA_API ma_result ma_biquad_get_heap_size(const ma_biquad_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_biquad_init_preallocated(const ma_biquad_config* pConfig, void* pHeap, ma_biquad* pBQ); + MA_API ma_result ma_biquad_init(const ma_biquad_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad* pBQ); + MA_API void ma_biquad_uninit(ma_biquad* pBQ, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ); + MA_API ma_result ma_biquad_clear_cache(ma_biquad* pBQ); + MA_API ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_biquad_get_latency(const ma_biquad* pBQ); + + + /************************************************************************************************************************************************************** + + Low-Pass Filtering + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; + } ma_lpf1_config, ma_lpf2_config; + + MA_API ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency); + MA_API ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient a; + ma_biquad_coefficient* pR1; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_lpf1; + + MA_API ma_result ma_lpf1_get_heap_size(const ma_lpf1_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_lpf1_init_preallocated(const ma_lpf1_config* pConfig, void* pHeap, ma_lpf1* pLPF); + MA_API ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf1* pLPF); + MA_API void ma_lpf1_uninit(ma_lpf1* pLPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF); + MA_API ma_result ma_lpf1_clear_cache(ma_lpf1* pLPF); + MA_API ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_lpf1_get_latency(const ma_lpf1* pLPF); + + typedef struct + { + ma_biquad bq; /* The second order low-pass filter is implemented as a biquad filter. */ + } ma_lpf2; + + MA_API ma_result ma_lpf2_get_heap_size(const ma_lpf2_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_lpf2_init_preallocated(const ma_lpf2_config* pConfig, void* pHeap, ma_lpf2* pHPF); + MA_API ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf2* pLPF); + MA_API void ma_lpf2_uninit(ma_lpf2* pLPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF); + MA_API ma_result ma_lpf2_clear_cache(ma_lpf2* pLPF); + MA_API ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_lpf2_get_latency(const ma_lpf2* pLPF); + + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ + } ma_lpf_config; + + MA_API ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_lpf1* pLPF1; + ma_lpf2* pLPF2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_lpf; + + MA_API ma_result ma_lpf_get_heap_size(const ma_lpf_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_lpf_init_preallocated(const ma_lpf_config* pConfig, void* pHeap, ma_lpf* pLPF); + MA_API ma_result ma_lpf_init(const ma_lpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf* pLPF); + MA_API void ma_lpf_uninit(ma_lpf* pLPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF); + MA_API ma_result ma_lpf_clear_cache(ma_lpf* pLPF); + MA_API ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_lpf_get_latency(const ma_lpf* pLPF); + + + /************************************************************************************************************************************************************** + + High-Pass Filtering + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; + } ma_hpf1_config, ma_hpf2_config; + + MA_API ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency); + MA_API ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient a; + ma_biquad_coefficient* pR1; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_hpf1; + + MA_API ma_result ma_hpf1_get_heap_size(const ma_hpf1_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_hpf1_init_preallocated(const ma_hpf1_config* pConfig, void* pHeap, ma_hpf1* pLPF); + MA_API ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf1* pHPF); + MA_API void ma_hpf1_uninit(ma_hpf1* pHPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF); + MA_API ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_hpf1_get_latency(const ma_hpf1* pHPF); + + typedef struct + { + ma_biquad bq; /* The second order high-pass filter is implemented as a biquad filter. */ + } ma_hpf2; + + MA_API ma_result ma_hpf2_get_heap_size(const ma_hpf2_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_hpf2_init_preallocated(const ma_hpf2_config* pConfig, void* pHeap, ma_hpf2* pHPF); + MA_API ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf2* pHPF); + MA_API void ma_hpf2_uninit(ma_hpf2* pHPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF); + MA_API ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_hpf2_get_latency(const ma_hpf2* pHPF); + + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ + } ma_hpf_config; + + MA_API ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_hpf1* pHPF1; + ma_hpf2* pHPF2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_hpf; + + MA_API ma_result ma_hpf_get_heap_size(const ma_hpf_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_hpf_init_preallocated(const ma_hpf_config* pConfig, void* pHeap, ma_hpf* pLPF); + MA_API ma_result ma_hpf_init(const ma_hpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf* pHPF); + MA_API void ma_hpf_uninit(ma_hpf* pHPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF); + MA_API ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_hpf_get_latency(const ma_hpf* pHPF); + + + /************************************************************************************************************************************************************** + + Band-Pass Filtering + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; + } ma_bpf2_config; + + MA_API ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + + typedef struct + { + ma_biquad bq; /* The second order band-pass filter is implemented as a biquad filter. */ + } ma_bpf2; + + MA_API ma_result ma_bpf2_get_heap_size(const ma_bpf2_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_bpf2_init_preallocated(const ma_bpf2_config* pConfig, void* pHeap, ma_bpf2* pBPF); + MA_API ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf2* pBPF); + MA_API void ma_bpf2_uninit(ma_bpf2* pBPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF); + MA_API ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_bpf2_get_latency(const ma_bpf2* pBPF); + + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ + } ma_bpf_config; + + MA_API ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 bpf2Count; + ma_bpf2* pBPF2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_bpf; + + MA_API ma_result ma_bpf_get_heap_size(const ma_bpf_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_bpf_init_preallocated(const ma_bpf_config* pConfig, void* pHeap, ma_bpf* pBPF); + MA_API ma_result ma_bpf_init(const ma_bpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf* pBPF); + MA_API void ma_bpf_uninit(ma_bpf* pBPF, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF); + MA_API ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_bpf_get_latency(const ma_bpf* pBPF); + + + /************************************************************************************************************************************************************** + + Notching Filter + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double q; + double frequency; + } ma_notch2_config, ma_notch_config; + + MA_API ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency); + + typedef struct + { + ma_biquad bq; + } ma_notch2; + + MA_API ma_result ma_notch2_get_heap_size(const ma_notch2_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_notch2_init_preallocated(const ma_notch2_config* pConfig, void* pHeap, ma_notch2* pFilter); + MA_API ma_result ma_notch2_init(const ma_notch2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch2* pFilter); + MA_API void ma_notch2_uninit(ma_notch2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter); + MA_API ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_notch2_get_latency(const ma_notch2* pFilter); + + + /************************************************************************************************************************************************************** + + Peaking EQ Filter + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double q; + double frequency; + } ma_peak2_config, ma_peak_config; + + MA_API ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + + typedef struct + { + ma_biquad bq; + } ma_peak2; + + MA_API ma_result ma_peak2_get_heap_size(const ma_peak2_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_peak2_init_preallocated(const ma_peak2_config* pConfig, void* pHeap, ma_peak2* pFilter); + MA_API ma_result ma_peak2_init(const ma_peak2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak2* pFilter); + MA_API void ma_peak2_uninit(ma_peak2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter); + MA_API ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_peak2_get_latency(const ma_peak2* pFilter); + + + /************************************************************************************************************************************************************** + + Low Shelf Filter + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double shelfSlope; + double frequency; + } ma_loshelf2_config, ma_loshelf_config; + + MA_API ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency); + + typedef struct + { + ma_biquad bq; + } ma_loshelf2; + + MA_API ma_result ma_loshelf2_get_heap_size(const ma_loshelf2_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_loshelf2_init_preallocated(const ma_loshelf2_config* pConfig, void* pHeap, ma_loshelf2* pFilter); + MA_API ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf2* pFilter); + MA_API void ma_loshelf2_uninit(ma_loshelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter); + MA_API ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_loshelf2_get_latency(const ma_loshelf2* pFilter); + + + /************************************************************************************************************************************************************** + + High Shelf Filter + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double shelfSlope; + double frequency; + } ma_hishelf2_config, ma_hishelf_config; + + MA_API ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency); + + typedef struct + { + ma_biquad bq; + } ma_hishelf2; + + MA_API ma_result ma_hishelf2_get_heap_size(const ma_hishelf2_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_hishelf2_init_preallocated(const ma_hishelf2_config* pConfig, void* pHeap, ma_hishelf2* pFilter); + MA_API ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf2* pFilter); + MA_API void ma_hishelf2_uninit(ma_hishelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter); + MA_API ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_uint32 ma_hishelf2_get_latency(const ma_hishelf2* pFilter); + + + + /* + Delay + */ + typedef struct + { + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 delayInFrames; + ma_bool32 delayStart; /* Set to true to delay the start of the output; false otherwise. */ + float wet; /* 0..1. Default = 1. */ + float dry; /* 0..1. Default = 1. */ + float decay; /* 0..1. Default = 0 (no feedback). Feedback decay. Use this for echo. */ + } ma_delay_config; + + MA_API ma_delay_config ma_delay_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay); + + + typedef struct + { + ma_delay_config config; + ma_uint32 cursor; /* Feedback is written to this cursor. Always equal or in front of the read cursor. */ + ma_uint32 bufferSizeInFrames; + float* pBuffer; + } ma_delay; + + MA_API ma_result ma_delay_init(const ma_delay_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay* pDelay); + MA_API void ma_delay_uninit(ma_delay* pDelay, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_delay_process_pcm_frames(ma_delay* pDelay, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount); + MA_API void ma_delay_set_wet(ma_delay* pDelay, float value); + MA_API float ma_delay_get_wet(const ma_delay* pDelay); + MA_API void ma_delay_set_dry(ma_delay* pDelay, float value); + MA_API float ma_delay_get_dry(const ma_delay* pDelay); + MA_API void ma_delay_set_decay(ma_delay* pDelay, float value); + MA_API float ma_delay_get_decay(const ma_delay* pDelay); + + + /* Gainer for smooth volume changes. */ + typedef struct + { + ma_uint32 channels; + ma_uint32 smoothTimeInFrames; + } ma_gainer_config; + + MA_API ma_gainer_config ma_gainer_config_init(ma_uint32 channels, ma_uint32 smoothTimeInFrames); + + + typedef struct + { + ma_gainer_config config; + ma_uint32 t; + float masterVolume; + float* pOldGains; + float* pNewGains; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_gainer; + + MA_API ma_result ma_gainer_get_heap_size(const ma_gainer_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_gainer_init_preallocated(const ma_gainer_config* pConfig, void* pHeap, ma_gainer* pGainer); + MA_API ma_result ma_gainer_init(const ma_gainer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_gainer* pGainer); + MA_API void ma_gainer_uninit(ma_gainer* pGainer, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_gainer_process_pcm_frames(ma_gainer* pGainer, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_result ma_gainer_set_gain(ma_gainer* pGainer, float newGain); + MA_API ma_result ma_gainer_set_gains(ma_gainer* pGainer, float* pNewGains); + MA_API ma_result ma_gainer_set_master_volume(ma_gainer* pGainer, float volume); + MA_API ma_result ma_gainer_get_master_volume(const ma_gainer* pGainer, float* pVolume); + + + + /* Stereo panner. */ + typedef enum + { + ma_pan_mode_balance = 0, /* Does not blend one side with the other. Technically just a balance. Compatible with other popular audio engines and therefore the default. */ + ma_pan_mode_pan /* A true pan. The sound from one side will "move" to the other side and blend with it. */ + } ma_pan_mode; + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_pan_mode mode; + float pan; + } ma_panner_config; + + MA_API ma_panner_config ma_panner_config_init(ma_format format, ma_uint32 channels); + + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_pan_mode mode; + float pan; /* -1..1 where 0 is no pan, -1 is left side, +1 is right side. Defaults to 0. */ + } ma_panner; + + MA_API ma_result ma_panner_init(const ma_panner_config* pConfig, ma_panner* pPanner); + MA_API ma_result ma_panner_process_pcm_frames(ma_panner* pPanner, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API void ma_panner_set_mode(ma_panner* pPanner, ma_pan_mode mode); + MA_API ma_pan_mode ma_panner_get_mode(const ma_panner* pPanner); + MA_API void ma_panner_set_pan(ma_panner* pPanner, float pan); + MA_API float ma_panner_get_pan(const ma_panner* pPanner); + + + + /* Fader. */ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + } ma_fader_config; + + MA_API ma_fader_config ma_fader_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate); + + typedef struct + { + ma_fader_config config; + float volumeBeg; /* If volumeBeg and volumeEnd is equal to 1, no fading happens (ma_fader_process_pcm_frames() will run as a passthrough). */ + float volumeEnd; + ma_uint64 lengthInFrames; /* The total length of the fade. */ + ma_int64 cursorInFrames; /* The current time in frames. Incremented by ma_fader_process_pcm_frames(). Signed because it'll be offset by startOffsetInFrames in set_fade_ex(). */ + } ma_fader; + + MA_API ma_result ma_fader_init(const ma_fader_config* pConfig, ma_fader* pFader); + MA_API ma_result ma_fader_process_pcm_frames(ma_fader* pFader, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API void ma_fader_get_data_format(const ma_fader* pFader, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate); + MA_API void ma_fader_set_fade(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames); + MA_API void ma_fader_set_fade_ex(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames, ma_int64 startOffsetInFrames); + MA_API float ma_fader_get_current_volume(const ma_fader* pFader); + + + + /* Spatializer. */ + typedef struct + { + float x; + float y; + float z; + } ma_vec3f; + + typedef struct + { + ma_vec3f v; + ma_spinlock lock; + } ma_atomic_vec3f; + + typedef enum + { + ma_attenuation_model_none, /* No distance attenuation and no spatialization. */ + ma_attenuation_model_inverse, /* Equivalent to OpenAL's AL_INVERSE_DISTANCE_CLAMPED. */ + ma_attenuation_model_linear, /* Linear attenuation. Equivalent to OpenAL's AL_LINEAR_DISTANCE_CLAMPED. */ + ma_attenuation_model_exponential /* Exponential attenuation. Equivalent to OpenAL's AL_EXPONENT_DISTANCE_CLAMPED. */ + } ma_attenuation_model; + + typedef enum + { + ma_positioning_absolute, + ma_positioning_relative + } ma_positioning; + + typedef enum + { + ma_handedness_right, + ma_handedness_left + } ma_handedness; + + + typedef struct + { + ma_uint32 channelsOut; + ma_channel* pChannelMapOut; + ma_handedness handedness; /* Defaults to right. Forward is -1 on the Z axis. In a left handed system, forward is +1 on the Z axis. */ + float coneInnerAngleInRadians; + float coneOuterAngleInRadians; + float coneOuterGain; + float speedOfSound; + ma_vec3f worldUp; + } ma_spatializer_listener_config; + + MA_API ma_spatializer_listener_config ma_spatializer_listener_config_init(ma_uint32 channelsOut); + + + typedef struct + { + ma_spatializer_listener_config config; + ma_atomic_vec3f position; /* The absolute position of the listener. */ + ma_atomic_vec3f direction; /* The direction the listener is facing. The world up vector is config.worldUp. */ + ma_atomic_vec3f velocity; + ma_bool32 isEnabled; + + /* Memory management. */ + ma_bool32 _ownsHeap; + void* _pHeap; + } ma_spatializer_listener; + + MA_API ma_result ma_spatializer_listener_get_heap_size(const ma_spatializer_listener_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_spatializer_listener_init_preallocated(const ma_spatializer_listener_config* pConfig, void* pHeap, ma_spatializer_listener* pListener); + MA_API ma_result ma_spatializer_listener_init(const ma_spatializer_listener_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer_listener* pListener); + MA_API void ma_spatializer_listener_uninit(ma_spatializer_listener* pListener, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_channel* ma_spatializer_listener_get_channel_map(ma_spatializer_listener* pListener); + MA_API void ma_spatializer_listener_set_cone(ma_spatializer_listener* pListener, float innerAngleInRadians, float outerAngleInRadians, float outerGain); + MA_API void ma_spatializer_listener_get_cone(const ma_spatializer_listener* pListener, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); + MA_API void ma_spatializer_listener_set_position(ma_spatializer_listener* pListener, float x, float y, float z); + MA_API ma_vec3f ma_spatializer_listener_get_position(const ma_spatializer_listener* pListener); + MA_API void ma_spatializer_listener_set_direction(ma_spatializer_listener* pListener, float x, float y, float z); + MA_API ma_vec3f ma_spatializer_listener_get_direction(const ma_spatializer_listener* pListener); + MA_API void ma_spatializer_listener_set_velocity(ma_spatializer_listener* pListener, float x, float y, float z); + MA_API ma_vec3f ma_spatializer_listener_get_velocity(const ma_spatializer_listener* pListener); + MA_API void ma_spatializer_listener_set_speed_of_sound(ma_spatializer_listener* pListener, float speedOfSound); + MA_API float ma_spatializer_listener_get_speed_of_sound(const ma_spatializer_listener* pListener); + MA_API void ma_spatializer_listener_set_world_up(ma_spatializer_listener* pListener, float x, float y, float z); + MA_API ma_vec3f ma_spatializer_listener_get_world_up(const ma_spatializer_listener* pListener); + MA_API void ma_spatializer_listener_set_enabled(ma_spatializer_listener* pListener, ma_bool32 isEnabled); + MA_API ma_bool32 ma_spatializer_listener_is_enabled(const ma_spatializer_listener* pListener); + + + typedef struct + { + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel* pChannelMapIn; + ma_attenuation_model attenuationModel; + ma_positioning positioning; + ma_handedness handedness; /* Defaults to right. Forward is -1 on the Z axis. In a left handed system, forward is +1 on the Z axis. */ + float minGain; + float maxGain; + float minDistance; + float maxDistance; + float rolloff; + float coneInnerAngleInRadians; + float coneOuterAngleInRadians; + float coneOuterGain; + float dopplerFactor; /* Set to 0 to disable doppler effect. */ + float directionalAttenuationFactor; /* Set to 0 to disable directional attenuation. */ + float minSpatializationChannelGain; /* The minimal scaling factor to apply to channel gains when accounting for the direction of the sound relative to the listener. Must be in the range of 0..1. Smaller values means more aggressive directional panning, larger values means more subtle directional panning. */ + ma_uint32 gainSmoothTimeInFrames; /* When the gain of a channel changes during spatialization, the transition will be linearly interpolated over this number of frames. */ + } ma_spatializer_config; + + MA_API ma_spatializer_config ma_spatializer_config_init(ma_uint32 channelsIn, ma_uint32 channelsOut); + + + typedef struct + { + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel* pChannelMapIn; + ma_attenuation_model attenuationModel; + ma_positioning positioning; + ma_handedness handedness; /* Defaults to right. Forward is -1 on the Z axis. In a left handed system, forward is +1 on the Z axis. */ + float minGain; + float maxGain; + float minDistance; + float maxDistance; + float rolloff; + float coneInnerAngleInRadians; + float coneOuterAngleInRadians; + float coneOuterGain; + float dopplerFactor; /* Set to 0 to disable doppler effect. */ + float directionalAttenuationFactor; /* Set to 0 to disable directional attenuation. */ + ma_uint32 gainSmoothTimeInFrames; /* When the gain of a channel changes during spatialization, the transition will be linearly interpolated over this number of frames. */ + ma_atomic_vec3f position; + ma_atomic_vec3f direction; + ma_atomic_vec3f velocity; /* For doppler effect. */ + float dopplerPitch; /* Will be updated by ma_spatializer_process_pcm_frames() and can be used by higher level functions to apply a pitch shift for doppler effect. */ + float minSpatializationChannelGain; + ma_gainer gainer; /* For smooth gain transitions. */ + float* pNewChannelGainsOut; /* An offset of _pHeap. Used by ma_spatializer_process_pcm_frames() to store new channel gains. The number of elements in this array is equal to config.channelsOut. */ + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_spatializer; + + MA_API ma_result ma_spatializer_get_heap_size(const ma_spatializer_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_spatializer_init_preallocated(const ma_spatializer_config* pConfig, void* pHeap, ma_spatializer* pSpatializer); + MA_API ma_result ma_spatializer_init(const ma_spatializer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer* pSpatializer); + MA_API void ma_spatializer_uninit(ma_spatializer* pSpatializer, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_spatializer_process_pcm_frames(ma_spatializer* pSpatializer, ma_spatializer_listener* pListener, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_result ma_spatializer_set_master_volume(ma_spatializer* pSpatializer, float volume); + MA_API ma_result ma_spatializer_get_master_volume(const ma_spatializer* pSpatializer, float* pVolume); + MA_API ma_uint32 ma_spatializer_get_input_channels(const ma_spatializer* pSpatializer); + MA_API ma_uint32 ma_spatializer_get_output_channels(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_attenuation_model(ma_spatializer* pSpatializer, ma_attenuation_model attenuationModel); + MA_API ma_attenuation_model ma_spatializer_get_attenuation_model(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_positioning(ma_spatializer* pSpatializer, ma_positioning positioning); + MA_API ma_positioning ma_spatializer_get_positioning(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_rolloff(ma_spatializer* pSpatializer, float rolloff); + MA_API float ma_spatializer_get_rolloff(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_min_gain(ma_spatializer* pSpatializer, float minGain); + MA_API float ma_spatializer_get_min_gain(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_max_gain(ma_spatializer* pSpatializer, float maxGain); + MA_API float ma_spatializer_get_max_gain(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_min_distance(ma_spatializer* pSpatializer, float minDistance); + MA_API float ma_spatializer_get_min_distance(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_max_distance(ma_spatializer* pSpatializer, float maxDistance); + MA_API float ma_spatializer_get_max_distance(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_cone(ma_spatializer* pSpatializer, float innerAngleInRadians, float outerAngleInRadians, float outerGain); + MA_API void ma_spatializer_get_cone(const ma_spatializer* pSpatializer, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); + MA_API void ma_spatializer_set_doppler_factor(ma_spatializer* pSpatializer, float dopplerFactor); + MA_API float ma_spatializer_get_doppler_factor(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_directional_attenuation_factor(ma_spatializer* pSpatializer, float directionalAttenuationFactor); + MA_API float ma_spatializer_get_directional_attenuation_factor(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_position(ma_spatializer* pSpatializer, float x, float y, float z); + MA_API ma_vec3f ma_spatializer_get_position(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_direction(ma_spatializer* pSpatializer, float x, float y, float z); + MA_API ma_vec3f ma_spatializer_get_direction(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_set_velocity(ma_spatializer* pSpatializer, float x, float y, float z); + MA_API ma_vec3f ma_spatializer_get_velocity(const ma_spatializer* pSpatializer); + MA_API void ma_spatializer_get_relative_position_and_direction(const ma_spatializer* pSpatializer, const ma_spatializer_listener* pListener, ma_vec3f* pRelativePos, ma_vec3f* pRelativeDir); + + + + /************************************************************************************************************************************************************ + ************************************************************************************************************************************************************* + + DATA CONVERSION + =============== + + This section contains the APIs for data conversion. You will find everything here for channel mapping, sample format conversion, resampling, etc. + + ************************************************************************************************************************************************************* + ************************************************************************************************************************************************************/ + + /************************************************************************************************************************************************************** + + Resampling + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_uint32 lpfOrder; /* The low-pass filter order. Setting this to 0 will disable low-pass filtering. */ + double lpfNyquistFactor; /* 0..1. Defaults to 1. 1 = Half the sampling frequency (Nyquist Frequency), 0.5 = Quarter the sampling frequency (half Nyquest Frequency), etc. */ + } ma_linear_resampler_config; + + MA_API ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + + typedef struct + { + ma_linear_resampler_config config; + ma_uint32 inAdvanceInt; + ma_uint32 inAdvanceFrac; + ma_uint32 inTimeInt; + ma_uint32 inTimeFrac; + union + { + float* f32; + ma_int16* s16; + } x0; /* The previous input frame. */ + union + { + float* f32; + ma_int16* s16; + } x1; /* The next input frame. */ + ma_lpf lpf; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_linear_resampler; + + MA_API ma_result ma_linear_resampler_get_heap_size(const ma_linear_resampler_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_linear_resampler_init_preallocated(const ma_linear_resampler_config* pConfig, void* pHeap, ma_linear_resampler* pResampler); + MA_API ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_linear_resampler* pResampler); + MA_API void ma_linear_resampler_uninit(ma_linear_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + MA_API ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + MA_API ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut); + MA_API ma_uint64 ma_linear_resampler_get_input_latency(const ma_linear_resampler* pResampler); + MA_API ma_uint64 ma_linear_resampler_get_output_latency(const ma_linear_resampler* pResampler); + MA_API ma_result ma_linear_resampler_get_required_input_frame_count(const ma_linear_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); + MA_API ma_result ma_linear_resampler_get_expected_output_frame_count(const ma_linear_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); + MA_API ma_result ma_linear_resampler_reset(ma_linear_resampler* pResampler); + + + typedef struct ma_resampler_config ma_resampler_config; + + typedef void ma_resampling_backend; + typedef struct + { + ma_result (* onGetHeapSize )(void* pUserData, const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes); + ma_result (* onInit )(void* pUserData, const ma_resampler_config* pConfig, void* pHeap, ma_resampling_backend** ppBackend); + void (* onUninit )(void* pUserData, ma_resampling_backend* pBackend, const ma_allocation_callbacks* pAllocationCallbacks); + ma_result (* onProcess )(void* pUserData, ma_resampling_backend* pBackend, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + ma_result (* onSetRate )(void* pUserData, ma_resampling_backend* pBackend, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); /* Optional. Rate changes will be disabled. */ + ma_uint64 (* onGetInputLatency )(void* pUserData, const ma_resampling_backend* pBackend); /* Optional. Latency will be reported as 0. */ + ma_uint64 (* onGetOutputLatency )(void* pUserData, const ma_resampling_backend* pBackend); /* Optional. Latency will be reported as 0. */ + ma_result (* onGetRequiredInputFrameCount )(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); /* Optional. Latency mitigation will be disabled. */ + ma_result (* onGetExpectedOutputFrameCount)(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); /* Optional. Latency mitigation will be disabled. */ + ma_result (* onReset )(void* pUserData, ma_resampling_backend* pBackend); + } ma_resampling_backend_vtable; + + typedef enum + { + ma_resample_algorithm_linear = 0, /* Fastest, lowest quality. Optional low-pass filtering. Default. */ + ma_resample_algorithm_custom, + } ma_resample_algorithm; + + struct ma_resampler_config + { + ma_format format; /* Must be either ma_format_f32 or ma_format_s16. */ + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_resample_algorithm algorithm; /* When set to ma_resample_algorithm_custom, pBackendVTable will be used. */ + ma_resampling_backend_vtable* pBackendVTable; + void* pBackendUserData; + struct + { + ma_uint32 lpfOrder; + } linear; + }; + + MA_API ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm); + + typedef struct + { + ma_resampling_backend* pBackend; + ma_resampling_backend_vtable* pBackendVTable; + void* pBackendUserData; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + union + { + ma_linear_resampler linear; + } state; /* State for stock resamplers so we can avoid a malloc. For stock resamplers, pBackend will point here. */ + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_resampler; + + MA_API ma_result ma_resampler_get_heap_size(const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_resampler_init_preallocated(const ma_resampler_config* pConfig, void* pHeap, ma_resampler* pResampler); + + /* + Initializes a new resampler object from a config. + */ + MA_API ma_result ma_resampler_init(const ma_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_resampler* pResampler); + + /* + Uninitializes a resampler. + */ + MA_API void ma_resampler_uninit(ma_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks); + + /* + Converts the given input data. + + Both the input and output frames must be in the format specified in the config when the resampler was initilized. + + On input, [pFrameCountOut] contains the number of output frames to process. On output it contains the number of output frames that + were actually processed, which may be less than the requested amount which will happen if there's not enough input data. You can use + ma_resampler_get_expected_output_frame_count() to know how many output frames will be processed for a given number of input frames. + + On input, [pFrameCountIn] contains the number of input frames contained in [pFramesIn]. On output it contains the number of whole + input frames that were actually processed. You can use ma_resampler_get_required_input_frame_count() to know how many input frames + you should provide for a given number of output frames. [pFramesIn] can be NULL, in which case zeroes will be used instead. + + If [pFramesOut] is NULL, a seek is performed. In this case, if [pFrameCountOut] is not NULL it will seek by the specified number of + output frames. Otherwise, if [pFramesCountOut] is NULL and [pFrameCountIn] is not NULL, it will seek by the specified number of input + frames. When seeking, [pFramesIn] is allowed to NULL, in which case the internal timing state will be updated, but no input will be + processed. In this case, any internal filter state will be updated as if zeroes were passed in. + + It is an error for [pFramesOut] to be non-NULL and [pFrameCountOut] to be NULL. + + It is an error for both [pFrameCountOut] and [pFrameCountIn] to be NULL. + */ + MA_API ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + + + /* + Sets the input and output sample rate. + */ + MA_API ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + + /* + Sets the input and output sample rate as a ratio. + + The ration is in/out. + */ + MA_API ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio); + + /* + Retrieves the latency introduced by the resampler in input frames. + */ + MA_API ma_uint64 ma_resampler_get_input_latency(const ma_resampler* pResampler); + + /* + Retrieves the latency introduced by the resampler in output frames. + */ + MA_API ma_uint64 ma_resampler_get_output_latency(const ma_resampler* pResampler); + + /* + Calculates the number of whole input frames that would need to be read from the client in order to output the specified + number of output frames. + + The returned value does not include cached input frames. It only returns the number of extra frames that would need to be + read from the input buffer in order to output the specified number of output frames. + */ + MA_API ma_result ma_resampler_get_required_input_frame_count(const ma_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); + + /* + Calculates the number of whole output frames that would be output after fully reading and consuming the specified number of + input frames. + */ + MA_API ma_result ma_resampler_get_expected_output_frame_count(const ma_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); + + /* + Resets the resampler's timer and clears it's internal cache. + */ + MA_API ma_result ma_resampler_reset(ma_resampler* pResampler); + + + /************************************************************************************************************************************************************** + + Channel Conversion + + **************************************************************************************************************************************************************/ + typedef enum + { + ma_channel_conversion_path_unknown, + ma_channel_conversion_path_passthrough, + ma_channel_conversion_path_mono_out, /* Converting to mono. */ + ma_channel_conversion_path_mono_in, /* Converting from mono. */ + ma_channel_conversion_path_shuffle, /* Simple shuffle. Will use this when all channels are present in both input and output channel maps, but just in a different order. */ + ma_channel_conversion_path_weights /* Blended based on weights. */ + } ma_channel_conversion_path; + + typedef enum + { + ma_mono_expansion_mode_duplicate = 0, /* The default. */ + ma_mono_expansion_mode_average, /* Average the mono channel across all channels. */ + ma_mono_expansion_mode_stereo_only, /* Duplicate to the left and right channels only and ignore the others. */ + ma_mono_expansion_mode_default = ma_mono_expansion_mode_duplicate + } ma_mono_expansion_mode; + + typedef struct + { + ma_format format; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + const ma_channel* pChannelMapIn; + const ma_channel* pChannelMapOut; + ma_channel_mix_mode mixingMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + float** ppWeights; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */ + } ma_channel_converter_config; + + MA_API ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel* pChannelMapIn, ma_uint32 channelsOut, const ma_channel* pChannelMapOut, ma_channel_mix_mode mixingMode); + + typedef struct + { + ma_format format; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel_mix_mode mixingMode; + ma_channel_conversion_path conversionPath; + ma_channel* pChannelMapIn; + ma_channel* pChannelMapOut; + ma_uint8* pShuffleTable; /* Indexed by output channel index. */ + union + { + float** f32; + ma_int32** s16; + } weights; /* [in][out] */ + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_channel_converter; + + MA_API ma_result ma_channel_converter_get_heap_size(const ma_channel_converter_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_channel_converter_init_preallocated(const ma_channel_converter_config* pConfig, void* pHeap, ma_channel_converter* pConverter); + MA_API ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_converter* pConverter); + MA_API void ma_channel_converter_uninit(ma_channel_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + MA_API ma_result ma_channel_converter_get_input_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_channel_converter_get_output_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); + + + /************************************************************************************************************************************************************** + + Data Conversion + + **************************************************************************************************************************************************************/ + typedef struct + { + ma_format formatIn; + ma_format formatOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_channel* pChannelMapIn; + ma_channel* pChannelMapOut; + ma_dither_mode ditherMode; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + float** ppChannelWeights; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */ + ma_bool32 allowDynamicSampleRate; + ma_resampler_config resampling; + } ma_data_converter_config; + + MA_API ma_data_converter_config ma_data_converter_config_init_default(void); + MA_API ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + + + typedef enum + { + ma_data_converter_execution_path_passthrough, /* No conversion. */ + ma_data_converter_execution_path_format_only, /* Only format conversion. */ + ma_data_converter_execution_path_channels_only, /* Only channel conversion. */ + ma_data_converter_execution_path_resample_only, /* Only resampling. */ + ma_data_converter_execution_path_resample_first, /* All conversions, but resample as the first step. */ + ma_data_converter_execution_path_channels_first /* All conversions, but channels as the first step. */ + } ma_data_converter_execution_path; + + typedef struct + { + ma_format formatIn; + ma_format formatOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_dither_mode ditherMode; + ma_data_converter_execution_path executionPath; /* The execution path the data converter will follow when processing. */ + ma_channel_converter channelConverter; + ma_resampler resampler; + ma_bool8 hasPreFormatConversion; + ma_bool8 hasPostFormatConversion; + ma_bool8 hasChannelConverter; + ma_bool8 hasResampler; + ma_bool8 isPassthrough; + + /* Memory management. */ + ma_bool8 _ownsHeap; + void* _pHeap; + } ma_data_converter; + + MA_API ma_result ma_data_converter_get_heap_size(const ma_data_converter_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_data_converter_init_preallocated(const ma_data_converter_config* pConfig, void* pHeap, ma_data_converter* pConverter); + MA_API ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_converter* pConverter); + MA_API void ma_data_converter_uninit(ma_data_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + MA_API ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + MA_API ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut); + MA_API ma_uint64 ma_data_converter_get_input_latency(const ma_data_converter* pConverter); + MA_API ma_uint64 ma_data_converter_get_output_latency(const ma_data_converter* pConverter); + MA_API ma_result ma_data_converter_get_required_input_frame_count(const ma_data_converter* pConverter, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); + MA_API ma_result ma_data_converter_get_expected_output_frame_count(const ma_data_converter* pConverter, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); + MA_API ma_result ma_data_converter_get_input_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_data_converter_get_output_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_data_converter_reset(ma_data_converter* pConverter); + + + /************************************************************************************************************************************************************ + + Format Conversion + + ************************************************************************************************************************************************************/ + MA_API void ma_pcm_u8_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_u8_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_u8_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_u8_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s16_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s16_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s16_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s16_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s24_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s24_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s24_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s24_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_s32_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_f32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_f32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_f32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_f32_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); + MA_API void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode); + MA_API void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode); + + /* + Deinterleaves an interleaved buffer. + */ + MA_API void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames); + + /* + Interleaves a group of deinterleaved buffers. + */ + MA_API void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames); + + + /************************************************************************************************************************************************************ + + Channel Maps + + ************************************************************************************************************************************************************/ + /* + This is used in the shuffle table to indicate that the channel index is undefined and should be ignored. + */ +#define MA_CHANNEL_INDEX_NULL 255 + + /* + Retrieves the channel position of the specified channel in the given channel map. + + The pChannelMap parameter can be null, in which case miniaudio's default channel map will be assumed. + */ + MA_API ma_channel ma_channel_map_get_channel(const ma_channel* pChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex); + + /* + Initializes a blank channel map. + + When a blank channel map is specified anywhere it indicates that the native channel map should be used. + */ + MA_API void ma_channel_map_init_blank(ma_channel* pChannelMap, ma_uint32 channels); + + /* + Helper for retrieving a standard channel map. + + The output channel map buffer must have a capacity of at least `channelMapCap`. + */ + MA_API void ma_channel_map_init_standard(ma_standard_channel_map standardChannelMap, ma_channel* pChannelMap, size_t channelMapCap, ma_uint32 channels); + + /* + Copies a channel map. + + Both input and output channel map buffers must have a capacity of at at least `channels`. + */ + MA_API void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels); + + /* + Copies a channel map if one is specified, otherwise copies the default channel map. + + The output buffer must have a capacity of at least `channels`. If not NULL, the input channel map must also have a capacity of at least `channels`. + */ + MA_API void ma_channel_map_copy_or_default(ma_channel* pOut, size_t channelMapCapOut, const ma_channel* pIn, ma_uint32 channels); + + + /* + Determines whether or not a channel map is valid. + + A blank channel map is valid (all channels set to MA_CHANNEL_NONE). The way a blank channel map is handled is context specific, but + is usually treated as a passthrough. + + Invalid channel maps: + - A channel map with no channels + - A channel map with more than one channel and a mono channel + + The channel map buffer must have a capacity of at least `channels`. + */ + MA_API ma_bool32 ma_channel_map_is_valid(const ma_channel* pChannelMap, ma_uint32 channels); + + /* + Helper for comparing two channel maps for equality. + + This assumes the channel count is the same between the two. + + Both channels map buffers must have a capacity of at least `channels`. + */ + MA_API ma_bool32 ma_channel_map_is_equal(const ma_channel* pChannelMapA, const ma_channel* pChannelMapB, ma_uint32 channels); + + /* + Helper for determining if a channel map is blank (all channels set to MA_CHANNEL_NONE). + + The channel map buffer must have a capacity of at least `channels`. + */ + MA_API ma_bool32 ma_channel_map_is_blank(const ma_channel* pChannelMap, ma_uint32 channels); + + /* + Helper for determining whether or not a channel is present in the given channel map. + + The channel map buffer must have a capacity of at least `channels`. + */ + MA_API ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition); + + /* + Find a channel position in the given channel map. Returns MA_TRUE if the channel is found; MA_FALSE otherwise. The + index of the channel is output to `pChannelIndex`. + + The channel map buffer must have a capacity of at least `channels`. + */ + MA_API ma_bool32 ma_channel_map_find_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition, ma_uint32* pChannelIndex); + + /* + Generates a string representing the given channel map. + + This is for printing and debugging purposes, not serialization/deserialization. + + Returns the length of the string, not including the null terminator. + */ + MA_API size_t ma_channel_map_to_string(const ma_channel* pChannelMap, ma_uint32 channels, char* pBufferOut, size_t bufferCap); + + /* + Retrieves a human readable version of a channel position. + */ + MA_API const char* ma_channel_position_to_string(ma_channel channel); + + + /************************************************************************************************************************************************************ + + Conversion Helpers + + ************************************************************************************************************************************************************/ + + /* + High-level helper for doing a full format conversion in one go. Returns the number of output frames. Call this with pOut set to NULL to + determine the required size of the output buffer. frameCountOut should be set to the capacity of pOut. If pOut is NULL, frameCountOut is + ignored. + + A return value of 0 indicates an error. + + This function is useful for one-off bulk conversions, but if you're streaming data you should use the ma_data_converter APIs instead. + */ + MA_API ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn); + MA_API ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig); + + + /************************************************************************************************************************************************************ + + Data Source + + ************************************************************************************************************************************************************/ + typedef void ma_data_source; + +#define MA_DATA_SOURCE_SELF_MANAGED_RANGE_AND_LOOP_POINT 0x00000001 + + typedef struct + { + ma_result (* onRead)(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + ma_result (* onSeek)(ma_data_source* pDataSource, ma_uint64 frameIndex); + ma_result (* onGetDataFormat)(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + ma_result (* onGetCursor)(ma_data_source* pDataSource, ma_uint64* pCursor); + ma_result (* onGetLength)(ma_data_source* pDataSource, ma_uint64* pLength); + ma_result (* onSetLooping)(ma_data_source* pDataSource, ma_bool32 isLooping); + ma_uint32 flags; + } ma_data_source_vtable; + + typedef ma_data_source* (* ma_data_source_get_next_proc)(ma_data_source* pDataSource); + + typedef struct + { + const ma_data_source_vtable* vtable; + } ma_data_source_config; + + MA_API ma_data_source_config ma_data_source_config_init(void); + + + typedef struct + { + const ma_data_source_vtable* vtable; + ma_uint64 rangeBegInFrames; + ma_uint64 rangeEndInFrames; /* Set to -1 for unranged (default). */ + ma_uint64 loopBegInFrames; /* Relative to rangeBegInFrames. */ + ma_uint64 loopEndInFrames; /* Relative to rangeBegInFrames. Set to -1 for the end of the range. */ + ma_data_source* pCurrent; /* When non-NULL, the data source being initialized will act as a proxy and will route all operations to pCurrent. Used in conjunction with pNext/onGetNext for seamless chaining. */ + ma_data_source* pNext; /* When set to NULL, onGetNext will be used. */ + ma_data_source_get_next_proc onGetNext; /* Will be used when pNext is NULL. If both are NULL, no next will be used. */ + MA_ATOMIC(4, ma_bool32) isLooping; + } ma_data_source_base; + + MA_API ma_result ma_data_source_init(const ma_data_source_config* pConfig, ma_data_source* pDataSource); + MA_API void ma_data_source_uninit(ma_data_source* pDataSource); + MA_API ma_result ma_data_source_read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); /* Must support pFramesOut = NULL in which case a forward seek should be performed. */ + MA_API ma_result ma_data_source_seek_pcm_frames(ma_data_source* pDataSource, ma_uint64 frameCount, ma_uint64* pFramesSeeked); /* Can only seek forward. Equivalent to ma_data_source_read_pcm_frames(pDataSource, NULL, frameCount, &framesRead); */ + MA_API ma_result ma_data_source_seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex); + MA_API ma_result ma_data_source_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_data_source_get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor); + MA_API ma_result ma_data_source_get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength); /* Returns MA_NOT_IMPLEMENTED if the length is unknown or cannot be determined. Decoders can return this. */ + MA_API ma_result ma_data_source_get_cursor_in_seconds(ma_data_source* pDataSource, float* pCursor); + MA_API ma_result ma_data_source_get_length_in_seconds(ma_data_source* pDataSource, float* pLength); + MA_API ma_result ma_data_source_set_looping(ma_data_source* pDataSource, ma_bool32 isLooping); + MA_API ma_bool32 ma_data_source_is_looping(const ma_data_source* pDataSource); + MA_API ma_result ma_data_source_set_range_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 rangeBegInFrames, ma_uint64 rangeEndInFrames); + MA_API void ma_data_source_get_range_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pRangeBegInFrames, ma_uint64* pRangeEndInFrames); + MA_API ma_result ma_data_source_set_loop_point_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 loopBegInFrames, ma_uint64 loopEndInFrames); + MA_API void ma_data_source_get_loop_point_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pLoopBegInFrames, ma_uint64* pLoopEndInFrames); + MA_API ma_result ma_data_source_set_current(ma_data_source* pDataSource, ma_data_source* pCurrentDataSource); + MA_API ma_data_source* ma_data_source_get_current(const ma_data_source* pDataSource); + MA_API ma_result ma_data_source_set_next(ma_data_source* pDataSource, ma_data_source* pNextDataSource); + MA_API ma_data_source* ma_data_source_get_next(const ma_data_source* pDataSource); + MA_API ma_result ma_data_source_set_next_callback(ma_data_source* pDataSource, ma_data_source_get_next_proc onGetNext); + MA_API ma_data_source_get_next_proc ma_data_source_get_next_callback(const ma_data_source* pDataSource); + + + typedef struct + { + ma_data_source_base ds; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint64 cursor; + ma_uint64 sizeInFrames; + const void* pData; + } ma_audio_buffer_ref; + + MA_API ma_result ma_audio_buffer_ref_init(ma_format format, ma_uint32 channels, const void* pData, ma_uint64 sizeInFrames, ma_audio_buffer_ref* pAudioBufferRef); + MA_API void ma_audio_buffer_ref_uninit(ma_audio_buffer_ref* pAudioBufferRef); + MA_API ma_result ma_audio_buffer_ref_set_data(ma_audio_buffer_ref* pAudioBufferRef, const void* pData, ma_uint64 sizeInFrames); + MA_API ma_uint64 ma_audio_buffer_ref_read_pcm_frames(ma_audio_buffer_ref* pAudioBufferRef, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop); + MA_API ma_result ma_audio_buffer_ref_seek_to_pcm_frame(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameIndex); + MA_API ma_result ma_audio_buffer_ref_map(ma_audio_buffer_ref* pAudioBufferRef, void** ppFramesOut, ma_uint64* pFrameCount); + MA_API ma_result ma_audio_buffer_ref_unmap(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameCount); /* Returns MA_AT_END if the end has been reached. This should be considered successful. */ + MA_API ma_bool32 ma_audio_buffer_ref_at_end(const ma_audio_buffer_ref* pAudioBufferRef); + MA_API ma_result ma_audio_buffer_ref_get_cursor_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pCursor); + MA_API ma_result ma_audio_buffer_ref_get_length_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pLength); + MA_API ma_result ma_audio_buffer_ref_get_available_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pAvailableFrames); + + + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint64 sizeInFrames; + const void* pData; /* If set to NULL, will allocate a block of memory for you. */ + ma_allocation_callbacks allocationCallbacks; + } ma_audio_buffer_config; + + MA_API ma_audio_buffer_config ma_audio_buffer_config_init(ma_format format, ma_uint32 channels, ma_uint64 sizeInFrames, const void* pData, const ma_allocation_callbacks* pAllocationCallbacks); + + typedef struct + { + ma_audio_buffer_ref ref; + ma_allocation_callbacks allocationCallbacks; + ma_bool32 ownsData; /* Used to control whether or not miniaudio owns the data buffer. If set to true, pData will be freed in ma_audio_buffer_uninit(). */ + ma_uint8 _pExtraData[1]; /* For allocating a buffer with the memory located directly after the other memory of the structure. */ + } ma_audio_buffer; + + MA_API ma_result ma_audio_buffer_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer); + MA_API ma_result ma_audio_buffer_init_copy(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer); + MA_API ma_result ma_audio_buffer_alloc_and_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer** ppAudioBuffer); /* Always copies the data. Doesn't make sense to use this otherwise. Use ma_audio_buffer_uninit_and_free() to uninit. */ + MA_API void ma_audio_buffer_uninit(ma_audio_buffer* pAudioBuffer); + MA_API void ma_audio_buffer_uninit_and_free(ma_audio_buffer* pAudioBuffer); + MA_API ma_uint64 ma_audio_buffer_read_pcm_frames(ma_audio_buffer* pAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop); + MA_API ma_result ma_audio_buffer_seek_to_pcm_frame(ma_audio_buffer* pAudioBuffer, ma_uint64 frameIndex); + MA_API ma_result ma_audio_buffer_map(ma_audio_buffer* pAudioBuffer, void** ppFramesOut, ma_uint64* pFrameCount); + MA_API ma_result ma_audio_buffer_unmap(ma_audio_buffer* pAudioBuffer, ma_uint64 frameCount); /* Returns MA_AT_END if the end has been reached. This should be considered successful. */ + MA_API ma_bool32 ma_audio_buffer_at_end(const ma_audio_buffer* pAudioBuffer); + MA_API ma_result ma_audio_buffer_get_cursor_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pCursor); + MA_API ma_result ma_audio_buffer_get_length_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pLength); + MA_API ma_result ma_audio_buffer_get_available_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pAvailableFrames); + + + /* + Paged Audio Buffer + ================== + A paged audio buffer is made up of a linked list of pages. It's expandable, but not shrinkable. It + can be used for cases where audio data is streamed in asynchronously while allowing data to be read + at the same time. + + This is lock-free, but not 100% thread safe. You can append a page and read from the buffer across + simultaneously across different threads, however only one thread at a time can append, and only one + thread at a time can read and seek. + */ + typedef struct ma_paged_audio_buffer_page ma_paged_audio_buffer_page; + struct ma_paged_audio_buffer_page + { + MA_ATOMIC(MA_SIZEOF_PTR, ma_paged_audio_buffer_page*) pNext; + ma_uint64 sizeInFrames; + ma_uint8 pAudioData[1]; + }; + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_paged_audio_buffer_page head; /* Dummy head for the lock-free algorithm. Always has a size of 0. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_paged_audio_buffer_page*) pTail; /* Never null. Initially set to &head. */ + } ma_paged_audio_buffer_data; + + MA_API ma_result ma_paged_audio_buffer_data_init(ma_format format, ma_uint32 channels, ma_paged_audio_buffer_data* pData); + MA_API void ma_paged_audio_buffer_data_uninit(ma_paged_audio_buffer_data* pData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_head(ma_paged_audio_buffer_data* pData); + MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_tail(ma_paged_audio_buffer_data* pData); + MA_API ma_result ma_paged_audio_buffer_data_get_length_in_pcm_frames(ma_paged_audio_buffer_data* pData, ma_uint64* pLength); + MA_API ma_result ma_paged_audio_buffer_data_allocate_page(ma_paged_audio_buffer_data* pData, ma_uint64 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks, ma_paged_audio_buffer_page** ppPage); + MA_API ma_result ma_paged_audio_buffer_data_free_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_paged_audio_buffer_data_append_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage); + MA_API ma_result ma_paged_audio_buffer_data_allocate_and_append_page(ma_paged_audio_buffer_data* pData, ma_uint32 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks); + + + typedef struct + { + ma_paged_audio_buffer_data* pData; /* Must not be null. */ + } ma_paged_audio_buffer_config; + + MA_API ma_paged_audio_buffer_config ma_paged_audio_buffer_config_init(ma_paged_audio_buffer_data* pData); + + + typedef struct + { + ma_data_source_base ds; + ma_paged_audio_buffer_data* pData; /* Audio data is read from here. Cannot be null. */ + ma_paged_audio_buffer_page* pCurrent; + ma_uint64 relativeCursor; /* Relative to the current page. */ + ma_uint64 absoluteCursor; + } ma_paged_audio_buffer; + + MA_API ma_result ma_paged_audio_buffer_init(const ma_paged_audio_buffer_config* pConfig, ma_paged_audio_buffer* pPagedAudioBuffer); + MA_API void ma_paged_audio_buffer_uninit(ma_paged_audio_buffer* pPagedAudioBuffer); + MA_API ma_result ma_paged_audio_buffer_read_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); /* Returns MA_AT_END if no more pages available. */ + MA_API ma_result ma_paged_audio_buffer_seek_to_pcm_frame(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64 frameIndex); + MA_API ma_result ma_paged_audio_buffer_get_cursor_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pCursor); + MA_API ma_result ma_paged_audio_buffer_get_length_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pLength); + + + + /************************************************************************************************************************************************************ + + Ring Buffer + + ************************************************************************************************************************************************************/ + typedef struct + { + void* pBuffer; + ma_uint32 subbufferSizeInBytes; + ma_uint32 subbufferCount; + ma_uint32 subbufferStrideInBytes; + MA_ATOMIC(4, ma_uint32) encodedReadOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. Must be used atomically. */ + MA_ATOMIC(4, ma_uint32) encodedWriteOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. Must be used atomically. */ + ma_bool8 ownsBuffer; /* Used to know whether or not miniaudio is responsible for free()-ing the buffer. */ + ma_bool8 clearOnWriteAcquire; /* When set, clears the acquired write buffer before returning from ma_rb_acquire_write(). */ + ma_allocation_callbacks allocationCallbacks; + } ma_rb; + + MA_API ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); + MA_API ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); + MA_API void ma_rb_uninit(ma_rb* pRB); + MA_API void ma_rb_reset(ma_rb* pRB); + MA_API ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); + MA_API ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes); + MA_API ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); + MA_API ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes); + MA_API ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes); + MA_API ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes); + MA_API ma_int32 ma_rb_pointer_distance(ma_rb* pRB); /* Returns the distance between the write pointer and the read pointer. Should never be negative for a correct program. Will return the number of bytes that can be read before the read pointer hits the write pointer. */ + MA_API ma_uint32 ma_rb_available_read(ma_rb* pRB); + MA_API ma_uint32 ma_rb_available_write(ma_rb* pRB); + MA_API size_t ma_rb_get_subbuffer_size(ma_rb* pRB); + MA_API size_t ma_rb_get_subbuffer_stride(ma_rb* pRB); + MA_API size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex); + MA_API void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer); + + + typedef struct + { + ma_data_source_base ds; + ma_rb rb; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; /* Not required for the ring buffer itself, but useful for associating the data with some sample rate, particularly for data sources. */ + } ma_pcm_rb; + + MA_API ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); + MA_API ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); + MA_API void ma_pcm_rb_uninit(ma_pcm_rb* pRB); + MA_API void ma_pcm_rb_reset(ma_pcm_rb* pRB); + MA_API ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); + MA_API ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames); + MA_API ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); + MA_API ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames); + MA_API ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); + MA_API ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); + MA_API ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB); /* Return value is in frames. */ + MA_API ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB); + MA_API ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB); + MA_API ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB); + MA_API ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB); + MA_API ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex); + MA_API void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer); + MA_API ma_format ma_pcm_rb_get_format(const ma_pcm_rb* pRB); + MA_API ma_uint32 ma_pcm_rb_get_channels(const ma_pcm_rb* pRB); + MA_API ma_uint32 ma_pcm_rb_get_sample_rate(const ma_pcm_rb* pRB); + MA_API void ma_pcm_rb_set_sample_rate(ma_pcm_rb* pRB, ma_uint32 sampleRate); + + + /* + The idea of the duplex ring buffer is to act as the intermediary buffer when running two asynchronous devices in a duplex set up. The + capture device writes to it, and then a playback device reads from it. + + At the moment this is just a simple naive implementation, but in the future I want to implement some dynamic resampling to seamlessly + handle desyncs. Note that the API is work in progress and may change at any time in any version. + + The size of the buffer is based on the capture side since that's what'll be written to the buffer. It is based on the capture period size + in frames. The internal sample rate of the capture device is also needed in order to calculate the size. + */ + typedef struct + { + ma_pcm_rb rb; + } ma_duplex_rb; + + MA_API ma_result ma_duplex_rb_init(ma_format captureFormat, ma_uint32 captureChannels, ma_uint32 sampleRate, ma_uint32 captureInternalSampleRate, ma_uint32 captureInternalPeriodSizeInFrames, const ma_allocation_callbacks* pAllocationCallbacks, ma_duplex_rb* pRB); + MA_API ma_result ma_duplex_rb_uninit(ma_duplex_rb* pRB); + + + /************************************************************************************************************************************************************ + + Miscellaneous Helpers + + ************************************************************************************************************************************************************/ + /* + Retrieves a human readable description of the given result code. + */ + MA_API const char* ma_result_description(ma_result result); + + /* + malloc() + */ + MA_API void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + + /* + calloc() + */ + MA_API void* ma_calloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + + /* + realloc() + */ + MA_API void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + + /* + free() + */ + MA_API void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + + /* + Performs an aligned malloc, with the assumption that the alignment is a power of 2. + */ + MA_API void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks); + + /* + Free's an aligned malloc'd buffer. + */ + MA_API void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + + /* + Retrieves a friendly name for a format. + */ + MA_API const char* ma_get_format_name(ma_format format); + + /* + Blends two frames in floating point format. + */ + MA_API void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels); + + /* + Retrieves the size of a sample in bytes for the given format. + + This API is efficient and is implemented using a lookup table. + + Thread Safety: SAFE + This API is pure. + */ + MA_API ma_uint32 ma_get_bytes_per_sample(ma_format format); + static MA_INLINE ma_uint32 ma_get_bytes_per_frame(ma_format format, ma_uint32 channels) { return ma_get_bytes_per_sample(format) * channels; } + + /* + Converts a log level to a string. + */ + MA_API const char* ma_log_level_to_string(ma_uint32 logLevel); + + + + + /************************************************************************************************************************************************************ + + Synchronization + + ************************************************************************************************************************************************************/ + /* + Locks a spinlock. + */ + MA_API ma_result ma_spinlock_lock(volatile ma_spinlock* pSpinlock); + + /* + Locks a spinlock, but does not yield() when looping. + */ + MA_API ma_result ma_spinlock_lock_noyield(volatile ma_spinlock* pSpinlock); + + /* + Unlocks a spinlock. + */ + MA_API ma_result ma_spinlock_unlock(volatile ma_spinlock* pSpinlock); + + +#ifndef MA_NO_THREADING + + /* + Creates a mutex. + + A mutex must be created from a valid context. A mutex is initially unlocked. + */ + MA_API ma_result ma_mutex_init(ma_mutex* pMutex); + + /* + Deletes a mutex. + */ + MA_API void ma_mutex_uninit(ma_mutex* pMutex); + + /* + Locks a mutex with an infinite timeout. + */ + MA_API void ma_mutex_lock(ma_mutex* pMutex); + + /* + Unlocks a mutex. + */ + MA_API void ma_mutex_unlock(ma_mutex* pMutex); + + + /* + Initializes an auto-reset event. + */ + MA_API ma_result ma_event_init(ma_event* pEvent); + + /* + Uninitializes an auto-reset event. + */ + MA_API void ma_event_uninit(ma_event* pEvent); + + /* + Waits for the specified auto-reset event to become signalled. + */ + MA_API ma_result ma_event_wait(ma_event* pEvent); + + /* + Signals the specified auto-reset event. + */ + MA_API ma_result ma_event_signal(ma_event* pEvent); +#endif /* MA_NO_THREADING */ + + + /* + Fence + ===== + This locks while the counter is larger than 0. Counter can be incremented and decremented by any + thread, but care needs to be taken when waiting. It is possible for one thread to acquire the + fence just as another thread returns from ma_fence_wait(). + + The idea behind a fence is to allow you to wait for a group of operations to complete. When an + operation starts, the counter is incremented which locks the fence. When the operation completes, + the fence will be released which decrements the counter. ma_fence_wait() will block until the + counter hits zero. + + If threading is disabled, ma_fence_wait() will spin on the counter. + */ + typedef struct + { +#ifndef MA_NO_THREADING + ma_event e; +#endif + ma_uint32 counter; + } ma_fence; + + MA_API ma_result ma_fence_init(ma_fence* pFence); + MA_API void ma_fence_uninit(ma_fence* pFence); + MA_API ma_result ma_fence_acquire(ma_fence* pFence); /* Increment counter. */ + MA_API ma_result ma_fence_release(ma_fence* pFence); /* Decrement counter. */ + MA_API ma_result ma_fence_wait(ma_fence* pFence); /* Wait for counter to reach 0. */ + + + + /* + Notification callback for asynchronous operations. + */ + typedef void ma_async_notification; + + typedef struct + { + void (* onSignal)(ma_async_notification* pNotification); + } ma_async_notification_callbacks; + + MA_API ma_result ma_async_notification_signal(ma_async_notification* pNotification); + + + /* + Simple polling notification. + + This just sets a variable when the notification has been signalled which is then polled with ma_async_notification_poll_is_signalled() + */ + typedef struct + { + ma_async_notification_callbacks cb; + ma_bool32 signalled; + } ma_async_notification_poll; + + MA_API ma_result ma_async_notification_poll_init(ma_async_notification_poll* pNotificationPoll); + MA_API ma_bool32 ma_async_notification_poll_is_signalled(const ma_async_notification_poll* pNotificationPoll); + + + /* + Event Notification + + This uses an ma_event. If threading is disabled (MA_NO_THREADING), initialization will fail. + */ + typedef struct + { + ma_async_notification_callbacks cb; +#ifndef MA_NO_THREADING + ma_event e; +#endif + } ma_async_notification_event; + + MA_API ma_result ma_async_notification_event_init(ma_async_notification_event* pNotificationEvent); + MA_API ma_result ma_async_notification_event_uninit(ma_async_notification_event* pNotificationEvent); + MA_API ma_result ma_async_notification_event_wait(ma_async_notification_event* pNotificationEvent); + MA_API ma_result ma_async_notification_event_signal(ma_async_notification_event* pNotificationEvent); + + + + + /************************************************************************************************************************************************************ + + Job Queue + + ************************************************************************************************************************************************************/ + + /* + Slot Allocator + -------------- + The idea of the slot allocator is for it to be used in conjunction with a fixed sized buffer. You use the slot allocator to allocator an index that can be used + as the insertion point for an object. + + Slots are reference counted to help mitigate the ABA problem in the lock-free queue we use for tracking jobs. + + The slot index is stored in the low 32 bits. The reference counter is stored in the high 32 bits: + + +-----------------+-----------------+ + | 32 Bits | 32 Bits | + +-----------------+-----------------+ + | Reference Count | Slot Index | + +-----------------+-----------------+ + */ + typedef struct + { + ma_uint32 capacity; /* The number of slots to make available. */ + } ma_slot_allocator_config; + + MA_API ma_slot_allocator_config ma_slot_allocator_config_init(ma_uint32 capacity); + + + typedef struct + { + MA_ATOMIC(4, ma_uint32) bitfield; /* Must be used atomically because the allocation and freeing routines need to make copies of this which must never be optimized away by the compiler. */ + } ma_slot_allocator_group; + + typedef struct + { + ma_slot_allocator_group* pGroups; /* Slots are grouped in chunks of 32. */ + ma_uint32* pSlots; /* 32 bits for reference counting for ABA mitigation. */ + ma_uint32 count; /* Allocation count. */ + ma_uint32 capacity; + + /* Memory management. */ + ma_bool32 _ownsHeap; + void* _pHeap; + } ma_slot_allocator; + + MA_API ma_result ma_slot_allocator_get_heap_size(const ma_slot_allocator_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_slot_allocator_init_preallocated(const ma_slot_allocator_config* pConfig, void* pHeap, ma_slot_allocator* pAllocator); + MA_API ma_result ma_slot_allocator_init(const ma_slot_allocator_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_slot_allocator* pAllocator); + MA_API void ma_slot_allocator_uninit(ma_slot_allocator* pAllocator, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_slot_allocator_alloc(ma_slot_allocator* pAllocator, ma_uint64* pSlot); + MA_API ma_result ma_slot_allocator_free(ma_slot_allocator* pAllocator, ma_uint64 slot); + + + typedef struct ma_job ma_job; + + /* + Callback for processing a job. Each job type will have their own processing callback which will be + called by ma_job_process(). + */ + typedef ma_result (* ma_job_proc)(ma_job* pJob); + + /* When a job type is added here an callback needs to be added go "g_jobVTable" in the implementation section. */ + typedef enum + { + /* Miscellaneous. */ + MA_JOB_TYPE_QUIT = 0, + MA_JOB_TYPE_CUSTOM, + + /* Resource Manager. */ + MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE, + MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE, + MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE, + MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER, + MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER, + MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM, + MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM, + MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_STREAM, + MA_JOB_TYPE_RESOURCE_MANAGER_SEEK_DATA_STREAM, + + /* Device. */ + MA_JOB_TYPE_DEVICE_AAUDIO_REROUTE, + + /* Count. Must always be last. */ + MA_JOB_TYPE_COUNT + } ma_job_type; + + struct ma_job + { + union + { + struct + { + ma_uint16 code; /* Job type. */ + ma_uint16 slot; /* Index into a ma_slot_allocator. */ + ma_uint32 refcount; + } breakup; + ma_uint64 allocation; + } toc; /* 8 bytes. We encode the job code into the slot allocation data to save space. */ + MA_ATOMIC(8, ma_uint64) next; /* refcount + slot for the next item. Does not include the job code. */ + ma_uint32 order; /* Execution order. Used to create a data dependency and ensure a job is executed in order. Usage is contextual depending on the job type. */ + + union + { + /* Miscellaneous. */ + struct + { + ma_job_proc proc; + ma_uintptr data0; + ma_uintptr data1; + } custom; + + /* Resource Manager */ + union + { + struct + { + /*ma_resource_manager**/ void* pResourceManager; + /*ma_resource_manager_data_buffer_node**/ void* pDataBufferNode; + char* pFilePath; + wchar_t* pFilePathW; + ma_uint32 flags; /* Resource manager data source flags that were used when initializing the data buffer. */ + ma_async_notification* pInitNotification; /* Signalled when the data buffer has been initialized and the format/channels/rate can be retrieved. */ + ma_async_notification* pDoneNotification; /* Signalled when the data buffer has been fully decoded. Will be passed through to MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE when decoding. */ + ma_fence* pInitFence; /* Released when initialization of the decoder is complete. */ + ma_fence* pDoneFence; /* Released if initialization of the decoder fails. Passed through to PAGE_DATA_BUFFER_NODE untouched if init is successful. */ + } loadDataBufferNode; + struct + { + /*ma_resource_manager**/ void* pResourceManager; + /*ma_resource_manager_data_buffer_node**/ void* pDataBufferNode; + ma_async_notification* pDoneNotification; + ma_fence* pDoneFence; + } freeDataBufferNode; + struct + { + /*ma_resource_manager**/ void* pResourceManager; + /*ma_resource_manager_data_buffer_node**/ void* pDataBufferNode; + /*ma_decoder**/ void* pDecoder; + ma_async_notification* pDoneNotification; /* Signalled when the data buffer has been fully decoded. */ + ma_fence* pDoneFence; /* Passed through from LOAD_DATA_BUFFER_NODE and released when the data buffer completes decoding or an error occurs. */ + } pageDataBufferNode; + + struct + { + /*ma_resource_manager_data_buffer**/ void* pDataBuffer; + ma_async_notification* pInitNotification; /* Signalled when the data buffer has been initialized and the format/channels/rate can be retrieved. */ + ma_async_notification* pDoneNotification; /* Signalled when the data buffer has been fully decoded. */ + ma_fence* pInitFence; /* Released when the data buffer has been initialized and the format/channels/rate can be retrieved. */ + ma_fence* pDoneFence; /* Released when the data buffer has been fully decoded. */ + ma_uint64 rangeBegInPCMFrames; + ma_uint64 rangeEndInPCMFrames; + ma_uint64 loopPointBegInPCMFrames; + ma_uint64 loopPointEndInPCMFrames; + ma_uint32 isLooping; + } loadDataBuffer; + struct + { + /*ma_resource_manager_data_buffer**/ void* pDataBuffer; + ma_async_notification* pDoneNotification; + ma_fence* pDoneFence; + } freeDataBuffer; + + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + char* pFilePath; /* Allocated when the job is posted, freed by the job thread after loading. */ + wchar_t* pFilePathW; /* ^ As above ^. Only used if pFilePath is NULL. */ + ma_uint64 initialSeekPoint; + ma_async_notification* pInitNotification; /* Signalled after the first two pages have been decoded and frames can be read from the stream. */ + ma_fence* pInitFence; + } loadDataStream; + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + ma_async_notification* pDoneNotification; + ma_fence* pDoneFence; + } freeDataStream; + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + ma_uint32 pageIndex; /* The index of the page to decode into. */ + } pageDataStream; + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + ma_uint64 frameIndex; + } seekDataStream; + } resourceManager; + + /* Device. */ + union + { + union + { + struct + { + /*ma_device**/ void* pDevice; + /*ma_device_type*/ ma_uint32 deviceType; + } reroute; + } aaudio; + } device; + } data; + }; + + MA_API ma_job ma_job_init(ma_uint16 code); + MA_API ma_result ma_job_process(ma_job* pJob); + + + /* + When set, ma_job_queue_next() will not wait and no semaphore will be signaled in + ma_job_queue_post(). ma_job_queue_next() will return MA_NO_DATA_AVAILABLE if nothing is available. + + This flag should always be used for platforms that do not support multithreading. + */ + typedef enum + { + MA_JOB_QUEUE_FLAG_NON_BLOCKING = 0x00000001 + } ma_job_queue_flags; + + typedef struct + { + ma_uint32 flags; + ma_uint32 capacity; /* The maximum number of jobs that can fit in the queue at a time. */ + } ma_job_queue_config; + + MA_API ma_job_queue_config ma_job_queue_config_init(ma_uint32 flags, ma_uint32 capacity); + + + typedef struct + { + ma_uint32 flags; /* Flags passed in at initialization time. */ + ma_uint32 capacity; /* The maximum number of jobs that can fit in the queue at a time. Set by the config. */ + MA_ATOMIC(8, ma_uint64) head; /* The first item in the list. Required for removing from the top of the list. */ + MA_ATOMIC(8, ma_uint64) tail; /* The last item in the list. Required for appending to the end of the list. */ +#ifndef MA_NO_THREADING + ma_semaphore sem; /* Only used when MA_JOB_QUEUE_FLAG_NON_BLOCKING is unset. */ +#endif + ma_slot_allocator allocator; + ma_job* pJobs; +#ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock lock; +#endif + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_job_queue; + + MA_API ma_result ma_job_queue_get_heap_size(const ma_job_queue_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_job_queue_init_preallocated(const ma_job_queue_config* pConfig, void* pHeap, ma_job_queue* pQueue); + MA_API ma_result ma_job_queue_init(const ma_job_queue_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_job_queue* pQueue); + MA_API void ma_job_queue_uninit(ma_job_queue* pQueue, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_job_queue_post(ma_job_queue* pQueue, const ma_job* pJob); + MA_API ma_result ma_job_queue_next(ma_job_queue* pQueue, ma_job* pJob); /* Returns MA_CANCELLED if the next job is a quit job. */ + + + + /************************************************************************************************************************************************************ + ************************************************************************************************************************************************************* + + DEVICE I/O + ========== + + This section contains the APIs for device playback and capture. Here is where you'll find ma_device_init(), etc. + + ************************************************************************************************************************************************************* + ************************************************************************************************************************************************************/ +#ifndef MA_NO_DEVICE_IO + /* Some backends are only supported on certain platforms. */ +#if defined(MA_WIN32) +#define MA_SUPPORT_WASAPI + +#if defined(MA_WIN32_DESKTOP) /* DirectSound and WinMM backends are only supported on desktops. */ +#define MA_SUPPORT_DSOUND +#define MA_SUPPORT_WINMM + + /* Don't enable JACK here if compiling with Cosmopolitan. It'll be enabled in the Linux section below. */ +#if !defined(__COSMOPOLITAN__) +#define MA_SUPPORT_JACK /* JACK is technically supported on Windows, but I don't know how many people use it in practice... */ +#endif +#endif +#endif +#if defined(MA_UNIX) && !defined(MA_ORBIS) && !defined(MA_PROSPERO) +#if defined(MA_LINUX) +#if !defined(MA_ANDROID) && !defined(__COSMOPOLITAN__) /* ALSA is not supported on Android. */ +#define MA_SUPPORT_ALSA +#endif +#endif +#if !defined(MA_BSD) && !defined(MA_ANDROID) && !defined(MA_EMSCRIPTEN) +#define MA_SUPPORT_PULSEAUDIO +#define MA_SUPPORT_JACK +#endif +#if defined(__OpenBSD__) /* <-- Change this to "#if defined(MA_BSD)" to enable sndio on all BSD flavors. */ +#define MA_SUPPORT_SNDIO /* sndio is only supported on OpenBSD for now. May be expanded later if there's demand. */ +#endif +#if defined(__NetBSD__) || defined(__OpenBSD__) +#define MA_SUPPORT_AUDIO4 /* Only support audio(4) on platforms with known support. */ +#endif +#if defined(__FreeBSD__) || defined(__DragonFly__) +#define MA_SUPPORT_OSS /* Only support OSS on specific platforms with known support. */ +#endif +#endif +#if defined(MA_ANDROID) +#define MA_SUPPORT_AAUDIO +#define MA_SUPPORT_OPENSL +#endif +#if defined(MA_APPLE) +#define MA_SUPPORT_COREAUDIO +#endif +#if defined(MA_EMSCRIPTEN) +#define MA_SUPPORT_WEBAUDIO +#endif + + /* All platforms should support custom backends. */ +#define MA_SUPPORT_CUSTOM + + /* Explicitly disable the Null backend for Emscripten because it uses a background thread which is not properly supported right now. */ +#if !defined(MA_EMSCRIPTEN) +#define MA_SUPPORT_NULL +#endif + + +#if defined(MA_SUPPORT_WASAPI) && !defined(MA_NO_WASAPI) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_WASAPI)) +#define MA_HAS_WASAPI +#endif +#if defined(MA_SUPPORT_DSOUND) && !defined(MA_NO_DSOUND) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_DSOUND)) +#define MA_HAS_DSOUND +#endif +#if defined(MA_SUPPORT_WINMM) && !defined(MA_NO_WINMM) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_WINMM)) +#define MA_HAS_WINMM +#endif +#if defined(MA_SUPPORT_ALSA) && !defined(MA_NO_ALSA) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_ALSA)) +#define MA_HAS_ALSA +#endif +#if defined(MA_SUPPORT_PULSEAUDIO) && !defined(MA_NO_PULSEAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_PULSEAUDIO)) +#define MA_HAS_PULSEAUDIO +#endif +#if defined(MA_SUPPORT_JACK) && !defined(MA_NO_JACK) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_JACK)) +#define MA_HAS_JACK +#endif +#if defined(MA_SUPPORT_COREAUDIO) && !defined(MA_NO_COREAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_COREAUDIO)) +#define MA_HAS_COREAUDIO +#endif +#if defined(MA_SUPPORT_SNDIO) && !defined(MA_NO_SNDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_SNDIO)) +#define MA_HAS_SNDIO +#endif +#if defined(MA_SUPPORT_AUDIO4) && !defined(MA_NO_AUDIO4) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_AUDIO4)) +#define MA_HAS_AUDIO4 +#endif +#if defined(MA_SUPPORT_OSS) && !defined(MA_NO_OSS) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_OSS)) +#define MA_HAS_OSS +#endif +#if defined(MA_SUPPORT_AAUDIO) && !defined(MA_NO_AAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_AAUDIO)) +#define MA_HAS_AAUDIO +#endif +#if defined(MA_SUPPORT_OPENSL) && !defined(MA_NO_OPENSL) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_OPENSL)) +#define MA_HAS_OPENSL +#endif +#if defined(MA_SUPPORT_WEBAUDIO) && !defined(MA_NO_WEBAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_WEBAUDIO)) +#define MA_HAS_WEBAUDIO +#endif +#if defined(MA_SUPPORT_CUSTOM) && !defined(MA_NO_CUSTOM) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_CUSTOM)) +#define MA_HAS_CUSTOM +#endif +#if defined(MA_SUPPORT_NULL) && !defined(MA_NO_NULL) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_NULL)) +#define MA_HAS_NULL +#endif + + typedef enum + { + ma_device_state_uninitialized = 0, + ma_device_state_stopped = 1, /* The device's default state after initialization. */ + ma_device_state_started = 2, /* The device is started and is requesting and/or delivering audio data. */ + ma_device_state_starting = 3, /* Transitioning from a stopped state to started. */ + ma_device_state_stopping = 4 /* Transitioning from a started state to stopped. */ + } ma_device_state; + + MA_ATOMIC_SAFE_TYPE_DECL(i32, 4, device_state) + + +#ifdef MA_SUPPORT_WASAPI + /* We need a IMMNotificationClient object for WASAPI. */ + typedef struct + { + void* lpVtbl; + ma_uint32 counter; + ma_device* pDevice; + } ma_IMMNotificationClient; +#endif + + /* Backend enums must be in priority order. */ + typedef enum + { + ma_backend_wasapi, + ma_backend_dsound, + ma_backend_winmm, + ma_backend_coreaudio, + ma_backend_sndio, + ma_backend_audio4, + ma_backend_oss, + ma_backend_pulseaudio, + ma_backend_alsa, + ma_backend_jack, + ma_backend_aaudio, + ma_backend_opensl, + ma_backend_webaudio, + ma_backend_custom, /* <-- Custom backend, with callbacks defined by the context config. */ + ma_backend_null /* <-- Must always be the last item. Lowest priority, and used as the terminator for backend enumeration. */ + } ma_backend; + +#define MA_BACKEND_COUNT (ma_backend_null+1) + + + /* + Device job thread. This is used by backends that require asynchronous processing of certain + operations. It is not used by all backends. + + The device job thread is made up of a thread and a job queue. You can post a job to the thread with + ma_device_job_thread_post(). The thread will do the processing of the job. + */ + typedef struct + { + ma_bool32 noThread; /* Set this to true if you want to process jobs yourself. */ + ma_uint32 jobQueueCapacity; + ma_uint32 jobQueueFlags; + } ma_device_job_thread_config; + + MA_API ma_device_job_thread_config ma_device_job_thread_config_init(void); + + typedef struct + { + ma_thread thread; + ma_job_queue jobQueue; + ma_bool32 _hasThread; + } ma_device_job_thread; + + MA_API ma_result ma_device_job_thread_init(const ma_device_job_thread_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_device_job_thread* pJobThread); + MA_API void ma_device_job_thread_uninit(ma_device_job_thread* pJobThread, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_device_job_thread_post(ma_device_job_thread* pJobThread, const ma_job* pJob); + MA_API ma_result ma_device_job_thread_next(ma_device_job_thread* pJobThread, ma_job* pJob); + + + + /* Device notification types. */ + typedef enum + { + ma_device_notification_type_started, + ma_device_notification_type_stopped, + ma_device_notification_type_rerouted, + ma_device_notification_type_interruption_began, + ma_device_notification_type_interruption_ended + } ma_device_notification_type; + + typedef struct + { + ma_device* pDevice; + ma_device_notification_type type; + union + { + struct + { + int _unused; + } started; + struct + { + int _unused; + } stopped; + struct + { + int _unused; + } rerouted; + struct + { + int _unused; + } interruption; + } data; + } ma_device_notification; + + /* + The notification callback for when the application should be notified of a change to the device. + + This callback is used for notifying the application of changes such as when the device has started, + stopped, rerouted or an interruption has occurred. Note that not all backends will post all + notification types. For example, some backends will perform automatic stream routing without any + kind of notification to the host program which means miniaudio will never know about it and will + never be able to fire the rerouted notification. You should keep this in mind when designing your + program. + + The stopped notification will *not* get fired when a device is rerouted. + + + Parameters + ---------- + pNotification (in) + A pointer to a structure containing information about the event. Use the `pDevice` member of + this object to retrieve the relevant device. The `type` member can be used to discriminate + against each of the notification types. + + + Remarks + ------- + Do not restart or uninitialize the device from the callback. + + Not all notifications will be triggered by all backends, however the started and stopped events + should be reliable for all backends. Some backends do not have a good way to detect device + stoppages due to unplugging the device which may result in the stopped callback not getting + fired. This has been observed with at least one BSD variant. + + The rerouted notification is fired *after* the reroute has occurred. The stopped notification will + *not* get fired when a device is rerouted. The following backends are known to do automatic stream + rerouting, but do not have a way to be notified of the change: + + * DirectSound + + The interruption notifications are used on mobile platforms for detecting when audio is interrupted + due to things like an incoming phone call. Currently this is only implemented on iOS. None of the + Android backends will report this notification. + */ + typedef void (* ma_device_notification_proc)(const ma_device_notification* pNotification); + + + /* + The callback for processing audio data from the device. + + The data callback is fired by miniaudio whenever the device needs to have more data delivered to a playback device, or when a capture device has some data + available. This is called as soon as the backend asks for more data which means it may be called with inconsistent frame counts. You cannot assume the + callback will be fired with a consistent frame count. + + + Parameters + ---------- + pDevice (in) + A pointer to the relevant device. + + pOutput (out) + A pointer to the output buffer that will receive audio data that will later be played back through the speakers. This will be non-null for a playback or + full-duplex device and null for a capture and loopback device. + + pInput (in) + A pointer to the buffer containing input data from a recording device. This will be non-null for a capture, full-duplex or loopback device and null for a + playback device. + + frameCount (in) + The number of PCM frames to process. Note that this will not necessarily be equal to what you requested when you initialized the device. The + `periodSizeInFrames` and `periodSizeInMilliseconds` members of the device config are just hints, and are not necessarily exactly what you'll get. You must + not assume this will always be the same value each time the callback is fired. + + + Remarks + ------- + You cannot stop and start the device from inside the callback or else you'll get a deadlock. You must also not uninitialize the device from inside the + callback. The following APIs cannot be called from inside the callback: + + ma_device_init() + ma_device_init_ex() + ma_device_uninit() + ma_device_start() + ma_device_stop() + + The proper way to stop the device is to call `ma_device_stop()` from a different thread, normally the main application thread. + */ + typedef void (* ma_device_data_proc)(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + + + + + /* + DEPRECATED. Use ma_device_notification_proc instead. + + The callback for when the device has been stopped. + + This will be called when the device is stopped explicitly with `ma_device_stop()` and also called implicitly when the device is stopped through external forces + such as being unplugged or an internal error occurring. + + + Parameters + ---------- + pDevice (in) + A pointer to the device that has just stopped. + + + Remarks + ------- + Do not restart or uninitialize the device from the callback. + */ + typedef void (* ma_stop_proc)(ma_device* pDevice); /* DEPRECATED. Use ma_device_notification_proc instead. */ + + typedef enum + { + ma_device_type_playback = 1, + ma_device_type_capture = 2, + ma_device_type_duplex = ma_device_type_playback | ma_device_type_capture, /* 3 */ + ma_device_type_loopback = 4 + } ma_device_type; + + typedef enum + { + ma_share_mode_shared = 0, + ma_share_mode_exclusive + } ma_share_mode; + + /* iOS/tvOS/watchOS session categories. */ + typedef enum + { + ma_ios_session_category_default = 0, /* AVAudioSessionCategoryPlayAndRecord. */ + ma_ios_session_category_none, /* Leave the session category unchanged. */ + ma_ios_session_category_ambient, /* AVAudioSessionCategoryAmbient */ + ma_ios_session_category_solo_ambient, /* AVAudioSessionCategorySoloAmbient */ + ma_ios_session_category_playback, /* AVAudioSessionCategoryPlayback */ + ma_ios_session_category_record, /* AVAudioSessionCategoryRecord */ + ma_ios_session_category_play_and_record, /* AVAudioSessionCategoryPlayAndRecord */ + ma_ios_session_category_multi_route /* AVAudioSessionCategoryMultiRoute */ + } ma_ios_session_category; + + /* iOS/tvOS/watchOS session category options */ + typedef enum + { + ma_ios_session_category_option_mix_with_others = 0x01, /* AVAudioSessionCategoryOptionMixWithOthers */ + ma_ios_session_category_option_duck_others = 0x02, /* AVAudioSessionCategoryOptionDuckOthers */ + ma_ios_session_category_option_allow_bluetooth = 0x04, /* AVAudioSessionCategoryOptionAllowBluetooth */ + ma_ios_session_category_option_default_to_speaker = 0x08, /* AVAudioSessionCategoryOptionDefaultToSpeaker */ + ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others = 0x11, /* AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers */ + ma_ios_session_category_option_allow_bluetooth_a2dp = 0x20, /* AVAudioSessionCategoryOptionAllowBluetoothA2DP */ + ma_ios_session_category_option_allow_air_play = 0x40, /* AVAudioSessionCategoryOptionAllowAirPlay */ + } ma_ios_session_category_option; + + /* OpenSL stream types. */ + typedef enum + { + ma_opensl_stream_type_default = 0, /* Leaves the stream type unset. */ + ma_opensl_stream_type_voice, /* SL_ANDROID_STREAM_VOICE */ + ma_opensl_stream_type_system, /* SL_ANDROID_STREAM_SYSTEM */ + ma_opensl_stream_type_ring, /* SL_ANDROID_STREAM_RING */ + ma_opensl_stream_type_media, /* SL_ANDROID_STREAM_MEDIA */ + ma_opensl_stream_type_alarm, /* SL_ANDROID_STREAM_ALARM */ + ma_opensl_stream_type_notification /* SL_ANDROID_STREAM_NOTIFICATION */ + } ma_opensl_stream_type; + + /* OpenSL recording presets. */ + typedef enum + { + ma_opensl_recording_preset_default = 0, /* Leaves the input preset unset. */ + ma_opensl_recording_preset_generic, /* SL_ANDROID_RECORDING_PRESET_GENERIC */ + ma_opensl_recording_preset_camcorder, /* SL_ANDROID_RECORDING_PRESET_CAMCORDER */ + ma_opensl_recording_preset_voice_recognition, /* SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION */ + ma_opensl_recording_preset_voice_communication, /* SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION */ + ma_opensl_recording_preset_voice_unprocessed /* SL_ANDROID_RECORDING_PRESET_UNPROCESSED */ + } ma_opensl_recording_preset; + + /* WASAPI audio thread priority characteristics. */ + typedef enum + { + ma_wasapi_usage_default = 0, + ma_wasapi_usage_games, + ma_wasapi_usage_pro_audio, + } ma_wasapi_usage; + + /* AAudio usage types. */ + typedef enum + { + ma_aaudio_usage_default = 0, /* Leaves the usage type unset. */ + ma_aaudio_usage_media, /* AAUDIO_USAGE_MEDIA */ + ma_aaudio_usage_voice_communication, /* AAUDIO_USAGE_VOICE_COMMUNICATION */ + ma_aaudio_usage_voice_communication_signalling, /* AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING */ + ma_aaudio_usage_alarm, /* AAUDIO_USAGE_ALARM */ + ma_aaudio_usage_notification, /* AAUDIO_USAGE_NOTIFICATION */ + ma_aaudio_usage_notification_ringtone, /* AAUDIO_USAGE_NOTIFICATION_RINGTONE */ + ma_aaudio_usage_notification_event, /* AAUDIO_USAGE_NOTIFICATION_EVENT */ + ma_aaudio_usage_assistance_accessibility, /* AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY */ + ma_aaudio_usage_assistance_navigation_guidance, /* AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE */ + ma_aaudio_usage_assistance_sonification, /* AAUDIO_USAGE_ASSISTANCE_SONIFICATION */ + ma_aaudio_usage_game, /* AAUDIO_USAGE_GAME */ + ma_aaudio_usage_assitant, /* AAUDIO_USAGE_ASSISTANT */ + ma_aaudio_usage_emergency, /* AAUDIO_SYSTEM_USAGE_EMERGENCY */ + ma_aaudio_usage_safety, /* AAUDIO_SYSTEM_USAGE_SAFETY */ + ma_aaudio_usage_vehicle_status, /* AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS */ + ma_aaudio_usage_announcement /* AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT */ + } ma_aaudio_usage; + + /* AAudio content types. */ + typedef enum + { + ma_aaudio_content_type_default = 0, /* Leaves the content type unset. */ + ma_aaudio_content_type_speech, /* AAUDIO_CONTENT_TYPE_SPEECH */ + ma_aaudio_content_type_music, /* AAUDIO_CONTENT_TYPE_MUSIC */ + ma_aaudio_content_type_movie, /* AAUDIO_CONTENT_TYPE_MOVIE */ + ma_aaudio_content_type_sonification /* AAUDIO_CONTENT_TYPE_SONIFICATION */ + } ma_aaudio_content_type; + + /* AAudio input presets. */ + typedef enum + { + ma_aaudio_input_preset_default = 0, /* Leaves the input preset unset. */ + ma_aaudio_input_preset_generic, /* AAUDIO_INPUT_PRESET_GENERIC */ + ma_aaudio_input_preset_camcorder, /* AAUDIO_INPUT_PRESET_CAMCORDER */ + ma_aaudio_input_preset_voice_recognition, /* AAUDIO_INPUT_PRESET_VOICE_RECOGNITION */ + ma_aaudio_input_preset_voice_communication, /* AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION */ + ma_aaudio_input_preset_unprocessed, /* AAUDIO_INPUT_PRESET_UNPROCESSED */ + ma_aaudio_input_preset_voice_performance /* AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE */ + } ma_aaudio_input_preset; + + typedef enum + { + ma_aaudio_allow_capture_default = 0, /* Leaves the allowed capture policy unset. */ + ma_aaudio_allow_capture_by_all, /* AAUDIO_ALLOW_CAPTURE_BY_ALL */ + ma_aaudio_allow_capture_by_system, /* AAUDIO_ALLOW_CAPTURE_BY_SYSTEM */ + ma_aaudio_allow_capture_by_none /* AAUDIO_ALLOW_CAPTURE_BY_NONE */ + } ma_aaudio_allowed_capture_policy; + + typedef union + { + ma_int64 counter; + double counterD; + } ma_timer; + + typedef union + { + ma_wchar_win32 wasapi[64]; /* WASAPI uses a wchar_t string for identification. */ + ma_uint8 dsound[16]; /* DirectSound uses a GUID for identification. */ + /*UINT_PTR*/ ma_uint32 winmm; /* When creating a device, WinMM expects a Win32 UINT_PTR for device identification. In practice it's actually just a UINT. */ + char alsa[256]; /* ALSA uses a name string for identification. */ + char pulse[256]; /* PulseAudio uses a name string for identification. */ + int jack; /* JACK always uses default devices. */ + char coreaudio[256]; /* Core Audio uses a string for identification. */ + char sndio[256]; /* "snd/0", etc. */ + char audio4[256]; /* "/dev/audio", etc. */ + char oss[64]; /* "dev/dsp0", etc. "dev/dsp" for the default device. */ + ma_int32 aaudio; /* AAudio uses a 32-bit integer for identification. */ + ma_uint32 opensl; /* OpenSL|ES uses a 32-bit unsigned integer for identification. */ + char webaudio[32]; /* Web Audio always uses default devices for now, but if this changes it'll be a GUID. */ + union + { + int i; + char s[256]; + void* p; + } custom; /* The custom backend could be anything. Give them a few options. */ + int nullbackend; /* The null backend uses an integer for device IDs. */ + } ma_device_id; + + + typedef struct ma_context_config ma_context_config; + typedef struct ma_device_config ma_device_config; + typedef struct ma_backend_callbacks ma_backend_callbacks; + +#define MA_DATA_FORMAT_FLAG_EXCLUSIVE_MODE (1U << 1) /* If set, this is supported in exclusive mode. Otherwise not natively supported by exclusive mode. */ + +#ifndef MA_MAX_DEVICE_NAME_LENGTH +#define MA_MAX_DEVICE_NAME_LENGTH 255 +#endif + + typedef struct + { + /* Basic info. This is the only information guaranteed to be filled in during device enumeration. */ + ma_device_id id; + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; /* +1 for null terminator. */ + ma_bool32 isDefault; + + ma_uint32 nativeDataFormatCount; + struct + { + ma_format format; /* Sample format. If set to ma_format_unknown, all sample formats are supported. */ + ma_uint32 channels; /* If set to 0, all channels are supported. */ + ma_uint32 sampleRate; /* If set to 0, all sample rates are supported. */ + ma_uint32 flags; /* A combination of MA_DATA_FORMAT_FLAG_* flags. */ + } nativeDataFormats[/*ma_format_count * ma_standard_sample_rate_count * MA_MAX_CHANNELS*/ 64]; /* Not sure how big to make this. There can be *many* permutations for virtual devices which can support anything. */ + } ma_device_info; + + struct ma_device_config + { + ma_device_type deviceType; + ma_uint32 sampleRate; + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInMilliseconds; + ma_uint32 periods; + ma_performance_profile performanceProfile; + ma_bool8 noPreSilencedOutputBuffer; /* When set to true, the contents of the output buffer passed into the data callback will be left undefined rather than initialized to silence. */ + ma_bool8 noClip; /* When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. Only applies when the playback sample format is f32. */ + ma_bool8 noDisableDenormals; /* Do not disable denormals when firing the data callback. */ + ma_bool8 noFixedSizedCallback; /* Disables strict fixed-sized data callbacks. Setting this to true will result in the period size being treated only as a hint to the backend. This is an optimization for those who don't need fixed sized callbacks. */ + ma_device_data_proc dataCallback; + ma_device_notification_proc notificationCallback; + ma_stop_proc stopCallback; + void* pUserData; + ma_resampler_config resampling; + struct + { + const ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_channel* pChannelMap; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + ma_share_mode shareMode; + } playback; + struct + { + const ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_channel* pChannelMap; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + ma_share_mode shareMode; + } capture; + + struct + { + ma_wasapi_usage usage; /* When configured, uses Avrt APIs to set the thread characteristics. */ + ma_bool8 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ + ma_bool8 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool8 noAutoStreamRouting; /* Disables automatic stream routing. */ + ma_bool8 noHardwareOffloading; /* Disables WASAPI's hardware offloading feature. */ + ma_uint32 loopbackProcessID; /* The process ID to include or exclude for loopback mode. Set to 0 to capture audio from all processes. Ignored when an explicit device ID is specified. */ + ma_bool8 loopbackProcessExclude; /* When set to true, excludes the process specified by loopbackProcessID. By default, the process will be included. */ + } wasapi; + struct + { + ma_bool32 noMMap; /* Disables MMap mode. */ + ma_bool32 noAutoFormat; /* Opens the ALSA device with SND_PCM_NO_AUTO_FORMAT. */ + ma_bool32 noAutoChannels; /* Opens the ALSA device with SND_PCM_NO_AUTO_CHANNELS. */ + ma_bool32 noAutoResample; /* Opens the ALSA device with SND_PCM_NO_AUTO_RESAMPLE. */ + } alsa; + struct + { + const char* pStreamNamePlayback; + const char* pStreamNameCapture; + } pulse; + struct + { + ma_bool32 allowNominalSampleRateChange; /* Desktop only. When enabled, allows changing of the sample rate at the operating system level. */ + } coreaudio; + struct + { + ma_opensl_stream_type streamType; + ma_opensl_recording_preset recordingPreset; + ma_bool32 enableCompatibilityWorkarounds; + } opensl; + struct + { + ma_aaudio_usage usage; + ma_aaudio_content_type contentType; + ma_aaudio_input_preset inputPreset; + ma_aaudio_allowed_capture_policy allowedCapturePolicy; + ma_bool32 noAutoStartAfterReroute; + ma_bool32 enableCompatibilityWorkarounds; + } aaudio; + }; + + + /* + The callback for handling device enumeration. This is fired from `ma_context_enumerate_devices()`. + + + Parameters + ---------- + pContext (in) + A pointer to the context performing the enumeration. + + deviceType (in) + The type of the device being enumerated. This will always be either `ma_device_type_playback` or `ma_device_type_capture`. + + pInfo (in) + A pointer to a `ma_device_info` containing the ID and name of the enumerated device. Note that this will not include detailed information about the device, + only basic information (ID and name). The reason for this is that it would otherwise require opening the backend device to probe for the information which + is too inefficient. + + pUserData (in) + The user data pointer passed into `ma_context_enumerate_devices()`. + */ + typedef ma_bool32 (* ma_enum_devices_callback_proc)(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData); + + + /* + Describes some basic details about a playback or capture device. + */ + typedef struct + { + const ma_device_id* pDeviceID; + ma_share_mode shareMode; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInMilliseconds; + ma_uint32 periodCount; + } ma_device_descriptor; + + /* + These are the callbacks required to be implemented for a backend. These callbacks are grouped into two parts: context and device. There is one context + to many devices. A device is created from a context. + + The general flow goes like this: + + 1) A context is created with `onContextInit()` + 1a) Available devices can be enumerated with `onContextEnumerateDevices()` if required. + 1b) Detailed information about a device can be queried with `onContextGetDeviceInfo()` if required. + 2) A device is created from the context that was created in the first step using `onDeviceInit()`, and optionally a device ID that was + selected from device enumeration via `onContextEnumerateDevices()`. + 3) A device is started or stopped with `onDeviceStart()` / `onDeviceStop()` + 4) Data is delivered to and from the device by the backend. This is always done based on the native format returned by the prior call + to `onDeviceInit()`. Conversion between the device's native format and the format requested by the application will be handled by + miniaudio internally. + + Initialization of the context is quite simple. You need to do any necessary initialization of internal objects and then output the + callbacks defined in this structure. + + Once the context has been initialized you can initialize a device. Before doing so, however, the application may want to know which + physical devices are available. This is where `onContextEnumerateDevices()` comes in. This is fairly simple. For each device, fire the + given callback with, at a minimum, the basic information filled out in `ma_device_info`. When the callback returns `MA_FALSE`, enumeration + needs to stop and the `onContextEnumerateDevices()` function returns with a success code. + + Detailed device information can be retrieved from a device ID using `onContextGetDeviceInfo()`. This takes as input the device type and ID, + and on output returns detailed information about the device in `ma_device_info`. The `onContextGetDeviceInfo()` callback must handle the + case when the device ID is NULL, in which case information about the default device needs to be retrieved. + + Once the context has been created and the device ID retrieved (if using anything other than the default device), the device can be created. + This is a little bit more complicated than initialization of the context due to it's more complicated configuration. When initializing a + device, a duplex device may be requested. This means a separate data format needs to be specified for both playback and capture. On input, + the data format is set to what the application wants. On output it's set to the native format which should match as closely as possible to + the requested format. The conversion between the format requested by the application and the device's native format will be handled + internally by miniaudio. + + On input, if the sample format is set to `ma_format_unknown`, the backend is free to use whatever sample format it desires, so long as it's + supported by miniaudio. When the channel count is set to 0, the backend should use the device's native channel count. The same applies for + sample rate. For the channel map, the default should be used when `ma_channel_map_is_blank()` returns true (all channels set to + `MA_CHANNEL_NONE`). On input, the `periodSizeInFrames` or `periodSizeInMilliseconds` option should always be set. The backend should + inspect both of these variables. If `periodSizeInFrames` is set, it should take priority, otherwise it needs to be derived from the period + size in milliseconds (`periodSizeInMilliseconds`) and the sample rate, keeping in mind that the sample rate may be 0, in which case the + sample rate will need to be determined before calculating the period size in frames. On output, all members of the `ma_device_descriptor` + object should be set to a valid value, except for `periodSizeInMilliseconds` which is optional (`periodSizeInFrames` *must* be set). + + Starting and stopping of the device is done with `onDeviceStart()` and `onDeviceStop()` and should be self-explanatory. If the backend uses + asynchronous reading and writing, `onDeviceStart()` and `onDeviceStop()` should always be implemented. + + The handling of data delivery between the application and the device is the most complicated part of the process. To make this a bit + easier, some helper callbacks are available. If the backend uses a blocking read/write style of API, the `onDeviceRead()` and + `onDeviceWrite()` callbacks can optionally be implemented. These are blocking and work just like reading and writing from a file. If the + backend uses a callback for data delivery, that callback must call `ma_device_handle_backend_data_callback()` from within it's callback. + This allows miniaudio to then process any necessary data conversion and then pass it to the miniaudio data callback. + + If the backend requires absolute flexibility with it's data delivery, it can optionally implement the `onDeviceDataLoop()` callback + which will allow it to implement the logic that will run on the audio thread. This is much more advanced and is completely optional. + + The audio thread should run data delivery logic in a loop while `ma_device_get_state() == ma_device_state_started` and no errors have been + encountered. Do not start or stop the device here. That will be handled from outside the `onDeviceDataLoop()` callback. + + The invocation of the `onDeviceDataLoop()` callback will be handled by miniaudio. When you start the device, miniaudio will fire this + callback. When the device is stopped, the `ma_device_get_state() == ma_device_state_started` condition will fail and the loop will be terminated + which will then fall through to the part that stops the device. For an example on how to implement the `onDeviceDataLoop()` callback, + look at `ma_device_audio_thread__default_read_write()`. Implement the `onDeviceDataLoopWakeup()` callback if you need a mechanism to + wake up the audio thread. + + If the backend supports an optimized retrieval of device information from an initialized `ma_device` object, it should implement the + `onDeviceGetInfo()` callback. This is optional, in which case it will fall back to `onContextGetDeviceInfo()` which is less efficient. + */ + struct ma_backend_callbacks + { + ma_result (* onContextInit)(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks); + ma_result (* onContextUninit)(ma_context* pContext); + ma_result (* onContextEnumerateDevices)(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); + ma_result (* onContextGetDeviceInfo)(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo); + ma_result (* onDeviceInit)(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture); + ma_result (* onDeviceUninit)(ma_device* pDevice); + ma_result (* onDeviceStart)(ma_device* pDevice); + ma_result (* onDeviceStop)(ma_device* pDevice); + ma_result (* onDeviceRead)(ma_device* pDevice, void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesRead); + ma_result (* onDeviceWrite)(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten); + ma_result (* onDeviceDataLoop)(ma_device* pDevice); + ma_result (* onDeviceDataLoopWakeup)(ma_device* pDevice); + ma_result (* onDeviceGetInfo)(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo); + }; + + struct ma_context_config + { + ma_log* pLog; + ma_thread_priority threadPriority; + size_t threadStackSize; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + struct + { + ma_bool32 useVerboseDeviceEnumeration; + } alsa; + struct + { + const char* pApplicationName; + const char* pServerName; + ma_bool32 tryAutoSpawn; /* Enables autospawning of the PulseAudio daemon if necessary. */ + } pulse; + struct + { + ma_ios_session_category sessionCategory; + ma_uint32 sessionCategoryOptions; + ma_bool32 noAudioSessionActivate; /* iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:true] on initialization. */ + ma_bool32 noAudioSessionDeactivate; /* iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:false] on uninitialization. */ + } coreaudio; + struct + { + const char* pClientName; + ma_bool32 tryStartServer; + } jack; + ma_backend_callbacks custom; + }; + + /* WASAPI specific structure for some commands which must run on a common thread due to bugs in WASAPI. */ + typedef struct + { + int code; + ma_event* pEvent; /* This will be signalled when the event is complete. */ + union + { + struct + { + int _unused; + } quit; + struct + { + ma_device_type deviceType; + void* pAudioClient; + void** ppAudioClientService; + ma_result* pResult; /* The result from creating the audio client service. */ + } createAudioClient; + struct + { + ma_device* pDevice; + ma_device_type deviceType; + } releaseAudioClient; + } data; + } ma_context_command__wasapi; + + struct ma_context + { + ma_backend_callbacks callbacks; + ma_backend backend; /* DirectSound, ALSA, etc. */ + ma_log* pLog; + ma_log log; /* Only used if the log is owned by the context. The pLog member will be set to &log in this case. */ + ma_thread_priority threadPriority; + size_t threadStackSize; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + ma_mutex deviceEnumLock; /* Used to make ma_context_get_devices() thread safe. */ + ma_mutex deviceInfoLock; /* Used to make ma_context_get_device_info() thread safe. */ + ma_uint32 deviceInfoCapacity; /* Total capacity of pDeviceInfos. */ + ma_uint32 playbackDeviceInfoCount; + ma_uint32 captureDeviceInfoCount; + ma_device_info* pDeviceInfos; /* Playback devices first, then capture. */ + + union + { +#ifdef MA_SUPPORT_WASAPI + struct + { + ma_thread commandThread; + ma_mutex commandLock; + ma_semaphore commandSem; + ma_uint32 commandIndex; + ma_uint32 commandCount; + ma_context_command__wasapi commands[4]; + ma_handle hAvrt; + ma_proc AvSetMmThreadCharacteristicsA; + ma_proc AvRevertMmThreadcharacteristics; + ma_handle hMMDevapi; + ma_proc ActivateAudioInterfaceAsync; + } wasapi; +#endif +#ifdef MA_SUPPORT_DSOUND + struct + { + ma_handle hDSoundDLL; + ma_proc DirectSoundCreate; + ma_proc DirectSoundEnumerateA; + ma_proc DirectSoundCaptureCreate; + ma_proc DirectSoundCaptureEnumerateA; + } dsound; +#endif +#ifdef MA_SUPPORT_WINMM + struct + { + ma_handle hWinMM; + ma_proc waveOutGetNumDevs; + ma_proc waveOutGetDevCapsA; + ma_proc waveOutOpen; + ma_proc waveOutClose; + ma_proc waveOutPrepareHeader; + ma_proc waveOutUnprepareHeader; + ma_proc waveOutWrite; + ma_proc waveOutReset; + ma_proc waveInGetNumDevs; + ma_proc waveInGetDevCapsA; + ma_proc waveInOpen; + ma_proc waveInClose; + ma_proc waveInPrepareHeader; + ma_proc waveInUnprepareHeader; + ma_proc waveInAddBuffer; + ma_proc waveInStart; + ma_proc waveInReset; + } winmm; +#endif +#ifdef MA_SUPPORT_ALSA + struct + { + ma_handle asoundSO; + ma_proc snd_pcm_open; + ma_proc snd_pcm_close; + ma_proc snd_pcm_hw_params_sizeof; + ma_proc snd_pcm_hw_params_any; + ma_proc snd_pcm_hw_params_set_format; + ma_proc snd_pcm_hw_params_set_format_first; + ma_proc snd_pcm_hw_params_get_format_mask; + ma_proc snd_pcm_hw_params_set_channels; + ma_proc snd_pcm_hw_params_set_channels_near; + ma_proc snd_pcm_hw_params_set_channels_minmax; + ma_proc snd_pcm_hw_params_set_rate_resample; + ma_proc snd_pcm_hw_params_set_rate; + ma_proc snd_pcm_hw_params_set_rate_near; + ma_proc snd_pcm_hw_params_set_buffer_size_near; + ma_proc snd_pcm_hw_params_set_periods_near; + ma_proc snd_pcm_hw_params_set_access; + ma_proc snd_pcm_hw_params_get_format; + ma_proc snd_pcm_hw_params_get_channels; + ma_proc snd_pcm_hw_params_get_channels_min; + ma_proc snd_pcm_hw_params_get_channels_max; + ma_proc snd_pcm_hw_params_get_rate; + ma_proc snd_pcm_hw_params_get_rate_min; + ma_proc snd_pcm_hw_params_get_rate_max; + ma_proc snd_pcm_hw_params_get_buffer_size; + ma_proc snd_pcm_hw_params_get_periods; + ma_proc snd_pcm_hw_params_get_access; + ma_proc snd_pcm_hw_params_test_format; + ma_proc snd_pcm_hw_params_test_channels; + ma_proc snd_pcm_hw_params_test_rate; + ma_proc snd_pcm_hw_params; + ma_proc snd_pcm_sw_params_sizeof; + ma_proc snd_pcm_sw_params_current; + ma_proc snd_pcm_sw_params_get_boundary; + ma_proc snd_pcm_sw_params_set_avail_min; + ma_proc snd_pcm_sw_params_set_start_threshold; + ma_proc snd_pcm_sw_params_set_stop_threshold; + ma_proc snd_pcm_sw_params; + ma_proc snd_pcm_format_mask_sizeof; + ma_proc snd_pcm_format_mask_test; + ma_proc snd_pcm_get_chmap; + ma_proc snd_pcm_state; + ma_proc snd_pcm_prepare; + ma_proc snd_pcm_start; + ma_proc snd_pcm_drop; + ma_proc snd_pcm_drain; + ma_proc snd_pcm_reset; + ma_proc snd_device_name_hint; + ma_proc snd_device_name_get_hint; + ma_proc snd_card_get_index; + ma_proc snd_device_name_free_hint; + ma_proc snd_pcm_mmap_begin; + ma_proc snd_pcm_mmap_commit; + ma_proc snd_pcm_recover; + ma_proc snd_pcm_readi; + ma_proc snd_pcm_writei; + ma_proc snd_pcm_avail; + ma_proc snd_pcm_avail_update; + ma_proc snd_pcm_wait; + ma_proc snd_pcm_nonblock; + ma_proc snd_pcm_info; + ma_proc snd_pcm_info_sizeof; + ma_proc snd_pcm_info_get_name; + ma_proc snd_pcm_poll_descriptors; + ma_proc snd_pcm_poll_descriptors_count; + ma_proc snd_pcm_poll_descriptors_revents; + ma_proc snd_config_update_free_global; + + ma_mutex internalDeviceEnumLock; + ma_bool32 useVerboseDeviceEnumeration; + } alsa; +#endif +#ifdef MA_SUPPORT_PULSEAUDIO + struct + { + ma_handle pulseSO; + ma_proc pa_mainloop_new; + ma_proc pa_mainloop_free; + ma_proc pa_mainloop_quit; + ma_proc pa_mainloop_get_api; + ma_proc pa_mainloop_iterate; + ma_proc pa_mainloop_wakeup; + ma_proc pa_threaded_mainloop_new; + ma_proc pa_threaded_mainloop_free; + ma_proc pa_threaded_mainloop_start; + ma_proc pa_threaded_mainloop_stop; + ma_proc pa_threaded_mainloop_lock; + ma_proc pa_threaded_mainloop_unlock; + ma_proc pa_threaded_mainloop_wait; + ma_proc pa_threaded_mainloop_signal; + ma_proc pa_threaded_mainloop_accept; + ma_proc pa_threaded_mainloop_get_retval; + ma_proc pa_threaded_mainloop_get_api; + ma_proc pa_threaded_mainloop_in_thread; + ma_proc pa_threaded_mainloop_set_name; + ma_proc pa_context_new; + ma_proc pa_context_unref; + ma_proc pa_context_connect; + ma_proc pa_context_disconnect; + ma_proc pa_context_set_state_callback; + ma_proc pa_context_get_state; + ma_proc pa_context_get_sink_info_list; + ma_proc pa_context_get_source_info_list; + ma_proc pa_context_get_sink_info_by_name; + ma_proc pa_context_get_source_info_by_name; + ma_proc pa_operation_unref; + ma_proc pa_operation_get_state; + ma_proc pa_channel_map_init_extend; + ma_proc pa_channel_map_valid; + ma_proc pa_channel_map_compatible; + ma_proc pa_stream_new; + ma_proc pa_stream_unref; + ma_proc pa_stream_connect_playback; + ma_proc pa_stream_connect_record; + ma_proc pa_stream_disconnect; + ma_proc pa_stream_get_state; + ma_proc pa_stream_get_sample_spec; + ma_proc pa_stream_get_channel_map; + ma_proc pa_stream_get_buffer_attr; + ma_proc pa_stream_set_buffer_attr; + ma_proc pa_stream_get_device_name; + ma_proc pa_stream_set_write_callback; + ma_proc pa_stream_set_read_callback; + ma_proc pa_stream_set_suspended_callback; + ma_proc pa_stream_set_moved_callback; + ma_proc pa_stream_is_suspended; + ma_proc pa_stream_flush; + ma_proc pa_stream_drain; + ma_proc pa_stream_is_corked; + ma_proc pa_stream_cork; + ma_proc pa_stream_trigger; + ma_proc pa_stream_begin_write; + ma_proc pa_stream_write; + ma_proc pa_stream_peek; + ma_proc pa_stream_drop; + ma_proc pa_stream_writable_size; + ma_proc pa_stream_readable_size; + + /*pa_mainloop**/ ma_ptr pMainLoop; + /*pa_context**/ ma_ptr pPulseContext; + char* pApplicationName; /* Set when the context is initialized. Used by devices for their local pa_context objects. */ + char* pServerName; /* Set when the context is initialized. Used by devices for their local pa_context objects. */ + } pulse; +#endif +#ifdef MA_SUPPORT_JACK + struct + { + ma_handle jackSO; + ma_proc jack_client_open; + ma_proc jack_client_close; + ma_proc jack_client_name_size; + ma_proc jack_set_process_callback; + ma_proc jack_set_buffer_size_callback; + ma_proc jack_on_shutdown; + ma_proc jack_get_sample_rate; + ma_proc jack_get_buffer_size; + ma_proc jack_get_ports; + ma_proc jack_activate; + ma_proc jack_deactivate; + ma_proc jack_connect; + ma_proc jack_port_register; + ma_proc jack_port_name; + ma_proc jack_port_get_buffer; + ma_proc jack_free; + + char* pClientName; + ma_bool32 tryStartServer; + } jack; +#endif +#ifdef MA_SUPPORT_COREAUDIO + struct + { + ma_handle hCoreFoundation; + ma_proc CFStringGetCString; + ma_proc CFRelease; + + ma_handle hCoreAudio; + ma_proc AudioObjectGetPropertyData; + ma_proc AudioObjectGetPropertyDataSize; + ma_proc AudioObjectSetPropertyData; + ma_proc AudioObjectAddPropertyListener; + ma_proc AudioObjectRemovePropertyListener; + + ma_handle hAudioUnit; /* Could possibly be set to AudioToolbox on later versions of macOS. */ + ma_proc AudioComponentFindNext; + ma_proc AudioComponentInstanceDispose; + ma_proc AudioComponentInstanceNew; + ma_proc AudioOutputUnitStart; + ma_proc AudioOutputUnitStop; + ma_proc AudioUnitAddPropertyListener; + ma_proc AudioUnitGetPropertyInfo; + ma_proc AudioUnitGetProperty; + ma_proc AudioUnitSetProperty; + ma_proc AudioUnitInitialize; + ma_proc AudioUnitRender; + + /*AudioComponent*/ ma_ptr component; + ma_bool32 noAudioSessionDeactivate; /* For tracking whether or not the iOS audio session should be explicitly deactivated. Set from the config in ma_context_init__coreaudio(). */ + } coreaudio; +#endif +#ifdef MA_SUPPORT_SNDIO + struct + { + ma_handle sndioSO; + ma_proc sio_open; + ma_proc sio_close; + ma_proc sio_setpar; + ma_proc sio_getpar; + ma_proc sio_getcap; + ma_proc sio_start; + ma_proc sio_stop; + ma_proc sio_read; + ma_proc sio_write; + ma_proc sio_onmove; + ma_proc sio_nfds; + ma_proc sio_pollfd; + ma_proc sio_revents; + ma_proc sio_eof; + ma_proc sio_setvol; + ma_proc sio_onvol; + ma_proc sio_initpar; + } sndio; +#endif +#ifdef MA_SUPPORT_AUDIO4 + struct + { + int _unused; + } audio4; +#endif +#ifdef MA_SUPPORT_OSS + struct + { + int versionMajor; + int versionMinor; + } oss; +#endif +#ifdef MA_SUPPORT_AAUDIO + struct + { + ma_handle hAAudio; /* libaaudio.so */ + ma_proc AAudio_createStreamBuilder; + ma_proc AAudioStreamBuilder_delete; + ma_proc AAudioStreamBuilder_setDeviceId; + ma_proc AAudioStreamBuilder_setDirection; + ma_proc AAudioStreamBuilder_setSharingMode; + ma_proc AAudioStreamBuilder_setFormat; + ma_proc AAudioStreamBuilder_setChannelCount; + ma_proc AAudioStreamBuilder_setSampleRate; + ma_proc AAudioStreamBuilder_setBufferCapacityInFrames; + ma_proc AAudioStreamBuilder_setFramesPerDataCallback; + ma_proc AAudioStreamBuilder_setDataCallback; + ma_proc AAudioStreamBuilder_setErrorCallback; + ma_proc AAudioStreamBuilder_setPerformanceMode; + ma_proc AAudioStreamBuilder_setUsage; + ma_proc AAudioStreamBuilder_setContentType; + ma_proc AAudioStreamBuilder_setInputPreset; + ma_proc AAudioStreamBuilder_setAllowedCapturePolicy; + ma_proc AAudioStreamBuilder_openStream; + ma_proc AAudioStream_close; + ma_proc AAudioStream_getState; + ma_proc AAudioStream_waitForStateChange; + ma_proc AAudioStream_getFormat; + ma_proc AAudioStream_getChannelCount; + ma_proc AAudioStream_getSampleRate; + ma_proc AAudioStream_getBufferCapacityInFrames; + ma_proc AAudioStream_getFramesPerDataCallback; + ma_proc AAudioStream_getFramesPerBurst; + ma_proc AAudioStream_requestStart; + ma_proc AAudioStream_requestStop; + ma_device_job_thread jobThread; /* For processing operations outside of the error callback, specifically device disconnections and rerouting. */ + } aaudio; +#endif +#ifdef MA_SUPPORT_OPENSL + struct + { + ma_handle libOpenSLES; + ma_handle SL_IID_ENGINE; + ma_handle SL_IID_AUDIOIODEVICECAPABILITIES; + ma_handle SL_IID_ANDROIDSIMPLEBUFFERQUEUE; + ma_handle SL_IID_RECORD; + ma_handle SL_IID_PLAY; + ma_handle SL_IID_OUTPUTMIX; + ma_handle SL_IID_ANDROIDCONFIGURATION; + ma_proc slCreateEngine; + } opensl; +#endif +#ifdef MA_SUPPORT_WEBAUDIO + struct + { + int _unused; + } webaudio; +#endif +#ifdef MA_SUPPORT_NULL + struct + { + int _unused; + } null_backend; +#endif + }; + + union + { +#if defined(MA_WIN32) + struct + { + /*HMODULE*/ ma_handle hOle32DLL; + ma_proc CoInitialize; + ma_proc CoInitializeEx; + ma_proc CoUninitialize; + ma_proc CoCreateInstance; + ma_proc CoTaskMemFree; + ma_proc PropVariantClear; + ma_proc StringFromGUID2; + + /*HMODULE*/ ma_handle hUser32DLL; + ma_proc GetForegroundWindow; + ma_proc GetDesktopWindow; + + /*HMODULE*/ ma_handle hAdvapi32DLL; + ma_proc RegOpenKeyExA; + ma_proc RegCloseKey; + ma_proc RegQueryValueExA; + + /*HRESULT*/ long CoInitializeResult; + } win32; +#endif +#ifdef MA_POSIX + struct + { + int _unused; + } posix; +#endif + int _unused; + }; + }; + + struct ma_device + { + ma_context* pContext; + ma_device_type type; + ma_uint32 sampleRate; + ma_atomic_device_state state; /* The state of the device is variable and can change at any time on any thread. Must be used atomically. */ + ma_device_data_proc onData; /* Set once at initialization time and should not be changed after. */ + ma_device_notification_proc onNotification; /* Set once at initialization time and should not be changed after. */ + ma_stop_proc onStop; /* DEPRECATED. Use the notification callback instead. Set once at initialization time and should not be changed after. */ + void* pUserData; /* Application defined data. */ + ma_mutex startStopLock; + ma_event wakeupEvent; + ma_event startEvent; + ma_event stopEvent; + ma_thread thread; + ma_result workResult; /* This is set by the worker thread after it's finished doing a job. */ + ma_bool8 isOwnerOfContext; /* When set to true, uninitializing the device will also uninitialize the context. Set to true when NULL is passed into ma_device_init(). */ + ma_bool8 noPreSilencedOutputBuffer; + ma_bool8 noClip; + ma_bool8 noDisableDenormals; + ma_bool8 noFixedSizedCallback; + ma_atomic_float masterVolumeFactor; /* Linear 0..1. Can be read and written simultaneously by different threads. Must be used atomically. */ + ma_duplex_rb duplexRB; /* Intermediary buffer for duplex device on asynchronous backends. */ + struct + { + ma_resample_algorithm algorithm; + ma_resampling_backend_vtable* pBackendVTable; + void* pBackendUserData; + struct + { + ma_uint32 lpfOrder; + } linear; + } resampling; + struct + { + ma_device_id* pID; /* Set to NULL if using default ID, otherwise set to the address of "id". */ + ma_device_id id; /* If using an explicit device, will be set to a copy of the ID used for initialization. Otherwise cleared to 0. */ + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; /* Maybe temporary. Likely to be replaced with a query API. */ + ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */ + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; + ma_data_converter converter; + void* pIntermediaryBuffer; /* For implementing fixed sized buffer callbacks. Will be null if using variable sized callbacks. */ + ma_uint32 intermediaryBufferCap; + ma_uint32 intermediaryBufferLen; /* How many valid frames are sitting in the intermediary buffer. */ + void* pInputCache; /* In external format. Can be null. */ + ma_uint64 inputCacheCap; + ma_uint64 inputCacheConsumed; + ma_uint64 inputCacheRemaining; + } playback; + struct + { + ma_device_id* pID; /* Set to NULL if using default ID, otherwise set to the address of "id". */ + ma_device_id id; /* If using an explicit device, will be set to a copy of the ID used for initialization. Otherwise cleared to 0. */ + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; /* Maybe temporary. Likely to be replaced with a query API. */ + ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */ + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; + ma_data_converter converter; + void* pIntermediaryBuffer; /* For implementing fixed sized buffer callbacks. Will be null if using variable sized callbacks. */ + ma_uint32 intermediaryBufferCap; + ma_uint32 intermediaryBufferLen; /* How many valid frames are sitting in the intermediary buffer. */ + } capture; + + union + { +#ifdef MA_SUPPORT_WASAPI + struct + { + /*IAudioClient**/ ma_ptr pAudioClientPlayback; + /*IAudioClient**/ ma_ptr pAudioClientCapture; + /*IAudioRenderClient**/ ma_ptr pRenderClient; + /*IAudioCaptureClient**/ ma_ptr pCaptureClient; + /*IMMDeviceEnumerator**/ ma_ptr pDeviceEnumerator; /* Used for IMMNotificationClient notifications. Required for detecting default device changes. */ + ma_IMMNotificationClient notificationClient; + /*HANDLE*/ ma_handle hEventPlayback; /* Auto reset. Initialized to signaled. */ + /*HANDLE*/ ma_handle hEventCapture; /* Auto reset. Initialized to unsignaled. */ + ma_uint32 actualBufferSizeInFramesPlayback; /* Value from GetBufferSize(). internalPeriodSizeInFrames is not set to the _actual_ buffer size when low-latency shared mode is being used due to the way the IAudioClient3 API works. */ + ma_uint32 actualBufferSizeInFramesCapture; + ma_uint32 originalPeriodSizeInFrames; + ma_uint32 originalPeriodSizeInMilliseconds; + ma_uint32 originalPeriods; + ma_performance_profile originalPerformanceProfile; + ma_uint32 periodSizeInFramesPlayback; + ma_uint32 periodSizeInFramesCapture; + void* pMappedBufferCapture; + ma_uint32 mappedBufferCaptureCap; + ma_uint32 mappedBufferCaptureLen; + void* pMappedBufferPlayback; + ma_uint32 mappedBufferPlaybackCap; + ma_uint32 mappedBufferPlaybackLen; + ma_atomic_bool32 isStartedCapture; /* Can be read and written simultaneously across different threads. Must be used atomically, and must be 32-bit. */ + ma_atomic_bool32 isStartedPlayback; /* Can be read and written simultaneously across different threads. Must be used atomically, and must be 32-bit. */ + ma_uint32 loopbackProcessID; + ma_bool8 loopbackProcessExclude; + ma_bool8 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ + ma_bool8 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool8 noHardwareOffloading; + ma_bool8 allowCaptureAutoStreamRouting; + ma_bool8 allowPlaybackAutoStreamRouting; + ma_bool8 isDetachedPlayback; + ma_bool8 isDetachedCapture; + ma_wasapi_usage usage; + void* hAvrtHandle; + ma_mutex rerouteLock; + } wasapi; +#endif +#ifdef MA_SUPPORT_DSOUND + struct + { + /*LPDIRECTSOUND*/ ma_ptr pPlayback; + /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackPrimaryBuffer; + /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackBuffer; + /*LPDIRECTSOUNDCAPTURE*/ ma_ptr pCapture; + /*LPDIRECTSOUNDCAPTUREBUFFER*/ ma_ptr pCaptureBuffer; + } dsound; +#endif +#ifdef MA_SUPPORT_WINMM + struct + { + /*HWAVEOUT*/ ma_handle hDevicePlayback; + /*HWAVEIN*/ ma_handle hDeviceCapture; + /*HANDLE*/ ma_handle hEventPlayback; + /*HANDLE*/ ma_handle hEventCapture; + ma_uint32 fragmentSizeInFrames; + ma_uint32 iNextHeaderPlayback; /* [0,periods). Used as an index into pWAVEHDRPlayback. */ + ma_uint32 iNextHeaderCapture; /* [0,periods). Used as an index into pWAVEHDRCapture. */ + ma_uint32 headerFramesConsumedPlayback; /* The number of PCM frames consumed in the buffer in pWAVEHEADER[iNextHeader]. */ + ma_uint32 headerFramesConsumedCapture; /* ^^^ */ + /*WAVEHDR**/ ma_uint8* pWAVEHDRPlayback; /* One instantiation for each period. */ + /*WAVEHDR**/ ma_uint8* pWAVEHDRCapture; /* One instantiation for each period. */ + ma_uint8* pIntermediaryBufferPlayback; + ma_uint8* pIntermediaryBufferCapture; + ma_uint8* _pHeapData; /* Used internally and is used for the heap allocated data for the intermediary buffer and the WAVEHDR structures. */ + } winmm; +#endif +#ifdef MA_SUPPORT_ALSA + struct + { + /*snd_pcm_t**/ ma_ptr pPCMPlayback; + /*snd_pcm_t**/ ma_ptr pPCMCapture; + /*struct pollfd**/ void* pPollDescriptorsPlayback; + /*struct pollfd**/ void* pPollDescriptorsCapture; + int pollDescriptorCountPlayback; + int pollDescriptorCountCapture; + int wakeupfdPlayback; /* eventfd for waking up from poll() when the playback device is stopped. */ + int wakeupfdCapture; /* eventfd for waking up from poll() when the capture device is stopped. */ + ma_bool8 isUsingMMapPlayback; + ma_bool8 isUsingMMapCapture; + } alsa; +#endif +#ifdef MA_SUPPORT_PULSEAUDIO + struct + { + /*pa_mainloop**/ ma_ptr pMainLoop; + /*pa_context**/ ma_ptr pPulseContext; + /*pa_stream**/ ma_ptr pStreamPlayback; + /*pa_stream**/ ma_ptr pStreamCapture; + } pulse; +#endif +#ifdef MA_SUPPORT_JACK + struct + { + /*jack_client_t**/ ma_ptr pClient; + /*jack_port_t**/ ma_ptr* ppPortsPlayback; + /*jack_port_t**/ ma_ptr* ppPortsCapture; + float* pIntermediaryBufferPlayback; /* Typed as a float because JACK is always floating point. */ + float* pIntermediaryBufferCapture; + } jack; +#endif +#ifdef MA_SUPPORT_COREAUDIO + struct + { + ma_uint32 deviceObjectIDPlayback; + ma_uint32 deviceObjectIDCapture; + /*AudioUnit*/ ma_ptr audioUnitPlayback; + /*AudioUnit*/ ma_ptr audioUnitCapture; + /*AudioBufferList**/ ma_ptr pAudioBufferList; /* Only used for input devices. */ + ma_uint32 audioBufferCapInFrames; /* Only used for input devices. The capacity in frames of each buffer in pAudioBufferList. */ + ma_event stopEvent; + ma_uint32 originalPeriodSizeInFrames; + ma_uint32 originalPeriodSizeInMilliseconds; + ma_uint32 originalPeriods; + ma_performance_profile originalPerformanceProfile; + ma_bool32 isDefaultPlaybackDevice; + ma_bool32 isDefaultCaptureDevice; + ma_bool32 isSwitchingPlaybackDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ + ma_bool32 isSwitchingCaptureDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ + void* pNotificationHandler; /* Only used on mobile platforms. Obj-C object for handling route changes. */ + } coreaudio; +#endif +#ifdef MA_SUPPORT_SNDIO + struct + { + ma_ptr handlePlayback; + ma_ptr handleCapture; + ma_bool32 isStartedPlayback; + ma_bool32 isStartedCapture; + } sndio; +#endif +#ifdef MA_SUPPORT_AUDIO4 + struct + { + int fdPlayback; + int fdCapture; + } audio4; +#endif +#ifdef MA_SUPPORT_OSS + struct + { + int fdPlayback; + int fdCapture; + } oss; +#endif +#ifdef MA_SUPPORT_AAUDIO + struct + { + /*AAudioStream**/ ma_ptr pStreamPlayback; + /*AAudioStream**/ ma_ptr pStreamCapture; + ma_aaudio_usage usage; + ma_aaudio_content_type contentType; + ma_aaudio_input_preset inputPreset; + ma_aaudio_allowed_capture_policy allowedCapturePolicy; + ma_bool32 noAutoStartAfterReroute; + } aaudio; +#endif +#ifdef MA_SUPPORT_OPENSL + struct + { + /*SLObjectItf*/ ma_ptr pOutputMixObj; + /*SLOutputMixItf*/ ma_ptr pOutputMix; + /*SLObjectItf*/ ma_ptr pAudioPlayerObj; + /*SLPlayItf*/ ma_ptr pAudioPlayer; + /*SLObjectItf*/ ma_ptr pAudioRecorderObj; + /*SLRecordItf*/ ma_ptr pAudioRecorder; + /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueuePlayback; + /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueueCapture; + ma_bool32 isDrainingCapture; + ma_bool32 isDrainingPlayback; + ma_uint32 currentBufferIndexPlayback; + ma_uint32 currentBufferIndexCapture; + ma_uint8* pBufferPlayback; /* This is malloc()'d and is used for storing audio data. Typed as ma_uint8 for easy offsetting. */ + ma_uint8* pBufferCapture; + } opensl; +#endif +#ifdef MA_SUPPORT_WEBAUDIO + struct + { + /* AudioWorklets path. */ + /* EMSCRIPTEN_WEBAUDIO_T */ int audioContext; + /* EMSCRIPTEN_WEBAUDIO_T */ int audioWorklet; + float* pIntermediaryBuffer; + void* pStackBuffer; + ma_result initResult; /* Set to MA_BUSY while initialization is in progress. */ + int deviceIndex; /* We store the device in a list on the JavaScript side. This is used to map our C object to the JS object. */ + } webaudio; +#endif +#ifdef MA_SUPPORT_NULL + struct + { + ma_thread deviceThread; + ma_event operationEvent; + ma_event operationCompletionEvent; + ma_semaphore operationSemaphore; + ma_uint32 operation; + ma_result operationResult; + ma_timer timer; + double priorRunTime; + ma_uint32 currentPeriodFramesRemainingPlayback; + ma_uint32 currentPeriodFramesRemainingCapture; + ma_uint64 lastProcessedFramePlayback; + ma_uint64 lastProcessedFrameCapture; + ma_atomic_bool32 isStarted; /* Read and written by multiple threads. Must be used atomically, and must be 32-bit for compiler compatibility. */ + } null_device; +#endif + }; + }; +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) +#pragma GCC diagnostic pop /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ +#endif + + /* + Initializes a `ma_context_config` object. + + + Return Value + ------------ + A `ma_context_config` initialized to defaults. + + + Remarks + ------- + You must always use this to initialize the default state of the `ma_context_config` object. Not using this will result in your program breaking when miniaudio + is updated and new members are added to `ma_context_config`. It also sets logical defaults. + + You can override members of the returned object by changing it's members directly. + + + See Also + -------- + ma_context_init() + */ + MA_API ma_context_config ma_context_config_init(void); + + /* + Initializes a context. + + The context is used for selecting and initializing an appropriate backend and to represent the backend at a more global level than that of an individual + device. There is one context to many devices, and a device is created from a context. A context is required to enumerate devices. + + + Parameters + ---------- + backends (in, optional) + A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order. + + backendCount (in, optional) + The number of items in `backend`. Ignored if `backend` is NULL. + + pConfig (in, optional) + The context configuration. + + pContext (in) + A pointer to the context object being initialized. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Unsafe. Do not call this function across multiple threads as some backends read and write to global state. + + + Remarks + ------- + When `backends` is NULL, the default priority order will be used. Below is a list of backends in priority order: + + |-------------|-----------------------|--------------------------------------------------------| + | Name | Enum Name | Supported Operating Systems | + |-------------|-----------------------|--------------------------------------------------------| + | WASAPI | ma_backend_wasapi | Windows Vista+ | + | DirectSound | ma_backend_dsound | Windows XP+ | + | WinMM | ma_backend_winmm | Windows XP+ (may work on older versions, but untested) | + | Core Audio | ma_backend_coreaudio | macOS, iOS | + | ALSA | ma_backend_alsa | Linux | + | PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) | + | JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) | + | sndio | ma_backend_sndio | OpenBSD | + | audio(4) | ma_backend_audio4 | NetBSD, OpenBSD | + | OSS | ma_backend_oss | FreeBSD | + | AAudio | ma_backend_aaudio | Android 8+ | + | OpenSL|ES | ma_backend_opensl | Android (API level 16+) | + | Web Audio | ma_backend_webaudio | Web (via Emscripten) | + | Null | ma_backend_null | Cross Platform (not used on Web) | + |-------------|-----------------------|--------------------------------------------------------| + + The context can be configured via the `pConfig` argument. The config object is initialized with `ma_context_config_init()`. Individual configuration settings + can then be set directly on the structure. Below are the members of the `ma_context_config` object. + + pLog + A pointer to the `ma_log` to post log messages to. Can be NULL if the application does not + require logging. See the `ma_log` API for details on how to use the logging system. + + threadPriority + The desired priority to use for the audio thread. Allowable values include the following: + + |--------------------------------------| + | Thread Priority | + |--------------------------------------| + | ma_thread_priority_idle | + | ma_thread_priority_lowest | + | ma_thread_priority_low | + | ma_thread_priority_normal | + | ma_thread_priority_high | + | ma_thread_priority_highest (default) | + | ma_thread_priority_realtime | + | ma_thread_priority_default | + |--------------------------------------| + + threadStackSize + The desired size of the stack for the audio thread. Defaults to the operating system's default. + + pUserData + A pointer to application-defined data. This can be accessed from the context object directly such as `context.pUserData`. + + allocationCallbacks + Structure containing custom allocation callbacks. Leaving this at defaults will cause it to use MA_MALLOC, MA_REALLOC and MA_FREE. These allocation + callbacks will be used for anything tied to the context, including devices. + + alsa.useVerboseDeviceEnumeration + ALSA will typically enumerate many different devices which can be intrusive and not user-friendly. To combat this, miniaudio will enumerate only unique + card/device pairs by default. The problem with this is that you lose a bit of flexibility and control. Setting alsa.useVerboseDeviceEnumeration makes + it so the ALSA backend includes all devices. Defaults to false. + + pulse.pApplicationName + PulseAudio only. The application name to use when initializing the PulseAudio context with `pa_context_new()`. + + pulse.pServerName + PulseAudio only. The name of the server to connect to with `pa_context_connect()`. + + pulse.tryAutoSpawn + PulseAudio only. Whether or not to try automatically starting the PulseAudio daemon. Defaults to false. If you set this to true, keep in mind that + miniaudio uses a trial and error method to find the most appropriate backend, and this will result in the PulseAudio daemon starting which may be + intrusive for the end user. + + coreaudio.sessionCategory + iOS only. The session category to use for the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents. + + |-----------------------------------------|-------------------------------------| + | miniaudio Token | Core Audio Token | + |-----------------------------------------|-------------------------------------| + | ma_ios_session_category_ambient | AVAudioSessionCategoryAmbient | + | ma_ios_session_category_solo_ambient | AVAudioSessionCategorySoloAmbient | + | ma_ios_session_category_playback | AVAudioSessionCategoryPlayback | + | ma_ios_session_category_record | AVAudioSessionCategoryRecord | + | ma_ios_session_category_play_and_record | AVAudioSessionCategoryPlayAndRecord | + | ma_ios_session_category_multi_route | AVAudioSessionCategoryMultiRoute | + | ma_ios_session_category_none | AVAudioSessionCategoryAmbient | + | ma_ios_session_category_default | AVAudioSessionCategoryAmbient | + |-----------------------------------------|-------------------------------------| + + coreaudio.sessionCategoryOptions + iOS only. Session category options to use with the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents. + + |---------------------------------------------------------------------------|------------------------------------------------------------------| + | miniaudio Token | Core Audio Token | + |---------------------------------------------------------------------------|------------------------------------------------------------------| + | ma_ios_session_category_option_mix_with_others | AVAudioSessionCategoryOptionMixWithOthers | + | ma_ios_session_category_option_duck_others | AVAudioSessionCategoryOptionDuckOthers | + | ma_ios_session_category_option_allow_bluetooth | AVAudioSessionCategoryOptionAllowBluetooth | + | ma_ios_session_category_option_default_to_speaker | AVAudioSessionCategoryOptionDefaultToSpeaker | + | ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others | AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers | + | ma_ios_session_category_option_allow_bluetooth_a2dp | AVAudioSessionCategoryOptionAllowBluetoothA2DP | + | ma_ios_session_category_option_allow_air_play | AVAudioSessionCategoryOptionAllowAirPlay | + |---------------------------------------------------------------------------|------------------------------------------------------------------| + + coreaudio.noAudioSessionActivate + iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:true] on initialization. + + coreaudio.noAudioSessionDeactivate + iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:false] on uninitialization. + + jack.pClientName + The name of the client to pass to `jack_client_open()`. + + jack.tryStartServer + Whether or not to try auto-starting the JACK server. Defaults to false. + + + It is recommended that only a single context is active at any given time because it's a bulky data structure which performs run-time linking for the + relevant backends every time it's initialized. + + The location of the context cannot change throughout it's lifetime. Consider allocating the `ma_context` object with `malloc()` if this is an issue. The + reason for this is that a pointer to the context is stored in the `ma_device` structure. + + + Example 1 - Default Initialization + ---------------------------------- + The example below shows how to initialize the context using the default configuration. + + ```c + ma_context context; + ma_result result = ma_context_init(NULL, 0, NULL, &context); + if (result != MA_SUCCESS) { + // Error. + } + ``` + + + Example 2 - Custom Configuration + -------------------------------- + The example below shows how to initialize the context using custom backend priorities and a custom configuration. In this hypothetical example, the program + wants to prioritize ALSA over PulseAudio on Linux. They also want to avoid using the WinMM backend on Windows because it's latency is too high. They also + want an error to be returned if no valid backend is available which they achieve by excluding the Null backend. + + For the configuration, the program wants to capture any log messages so they can, for example, route it to a log file and user interface. + + ```c + ma_backend backends[] = { + ma_backend_alsa, + ma_backend_pulseaudio, + ma_backend_wasapi, + ma_backend_dsound + }; + + ma_log log; + ma_log_init(&log); + ma_log_register_callback(&log, ma_log_callback_init(my_log_callbac, pMyLogUserData)); + + ma_context_config config = ma_context_config_init(); + config.pLog = &log; // Specify a custom log object in the config so any logs that are posted from ma_context_init() are captured. + + ma_context context; + ma_result result = ma_context_init(backends, sizeof(backends)/sizeof(backends[0]), &config, &context); + if (result != MA_SUCCESS) { + // Error. + if (result == MA_NO_BACKEND) { + // Couldn't find an appropriate backend. + } + } + + // You could also attach a log callback post-initialization: + ma_log_register_callback(ma_context_get_log(&context), ma_log_callback_init(my_log_callback, pMyLogUserData)); + ``` + + + See Also + -------- + ma_context_config_init() + ma_context_uninit() + */ + MA_API ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext); + + /* + Uninitializes a context. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Unsafe. Do not call this function across multiple threads as some backends read and write to global state. + + + Remarks + ------- + Results are undefined if you call this while any device created by this context is still active. + + + See Also + -------- + ma_context_init() + */ + MA_API ma_result ma_context_uninit(ma_context* pContext); + + /* + Retrieves the size of the ma_context object. + + This is mainly for the purpose of bindings to know how much memory to allocate. + */ + MA_API size_t ma_context_sizeof(void); + + /* + Retrieves a pointer to the log object associated with this context. + + + Remarks + ------- + Pass the returned pointer to `ma_log_post()`, `ma_log_postv()` or `ma_log_postf()` to post a log + message. + + You can attach your own logging callback to the log with `ma_log_register_callback()` + + + Return Value + ------------ + A pointer to the `ma_log` object that the context uses to post log messages. If some error occurs, + NULL will be returned. + */ + MA_API ma_log* ma_context_get_log(ma_context* pContext); + + /* + Enumerates over every device (both playback and capture). + + This is a lower-level enumeration function to the easier to use `ma_context_get_devices()`. Use `ma_context_enumerate_devices()` if you would rather not incur + an internal heap allocation, or it simply suits your code better. + + Note that this only retrieves the ID and name/description of the device. The reason for only retrieving basic information is that it would otherwise require + opening the backend device in order to probe it for more detailed information which can be inefficient. Consider using `ma_context_get_device_info()` for this, + but don't call it from within the enumeration callback. + + Returning false from the callback will stop enumeration. Returning true will continue enumeration. + + + Parameters + ---------- + pContext (in) + A pointer to the context performing the enumeration. + + callback (in) + The callback to fire for each enumerated device. + + pUserData (in) + A pointer to application-defined data passed to the callback. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Safe. This is guarded using a simple mutex lock. + + + Remarks + ------- + Do _not_ assume the first enumerated device of a given type is the default device. + + Some backends and platforms may only support default playback and capture devices. + + In general, you should not do anything complicated from within the callback. In particular, do not try initializing a device from within the callback. Also, + do not try to call `ma_context_get_device_info()` from within the callback. + + Consider using `ma_context_get_devices()` for a simpler and safer API, albeit at the expense of an internal heap allocation. + + + Example 1 - Simple Enumeration + ------------------------------ + ma_bool32 ma_device_enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) + { + printf("Device Name: %s\n", pInfo->name); + return MA_TRUE; + } + + ma_result result = ma_context_enumerate_devices(&context, my_device_enum_callback, pMyUserData); + if (result != MA_SUCCESS) { + // Error. + } + + + See Also + -------- + ma_context_get_devices() + */ + MA_API ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); + + /* + Retrieves basic information about every active playback and/or capture device. + + This function will allocate memory internally for the device lists and return a pointer to them through the `ppPlaybackDeviceInfos` and `ppCaptureDeviceInfos` + parameters. If you do not want to incur the overhead of these allocations consider using `ma_context_enumerate_devices()` which will instead use a callback. + + + Parameters + ---------- + pContext (in) + A pointer to the context performing the enumeration. + + ppPlaybackDeviceInfos (out) + A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for playback devices. + + pPlaybackDeviceCount (out) + A pointer to an unsigned integer that will receive the number of playback devices. + + ppCaptureDeviceInfos (out) + A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for capture devices. + + pCaptureDeviceCount (out) + A pointer to an unsigned integer that will receive the number of capture devices. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Unsafe. Since each call to this function invalidates the pointers from the previous call, you should not be calling this simultaneously across multiple + threads. Instead, you need to make a copy of the returned data with your own higher level synchronization. + + + Remarks + ------- + It is _not_ safe to assume the first device in the list is the default device. + + You can pass in NULL for the playback or capture lists in which case they'll be ignored. + + The returned pointers will become invalid upon the next call this this function, or when the context is uninitialized. Do not free the returned pointers. + + + See Also + -------- + ma_context_get_devices() + */ + MA_API ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount); + + /* + Retrieves information about a device of the given type, with the specified ID and share mode. + + + Parameters + ---------- + pContext (in) + A pointer to the context performing the query. + + deviceType (in) + The type of the device being queried. Must be either `ma_device_type_playback` or `ma_device_type_capture`. + + pDeviceID (in) + The ID of the device being queried. + + pDeviceInfo (out) + A pointer to the `ma_device_info` structure that will receive the device information. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Safe. This is guarded using a simple mutex lock. + + + Remarks + ------- + Do _not_ call this from within the `ma_context_enumerate_devices()` callback. + + It's possible for a device to have different information and capabilities depending on whether or not it's opened in shared or exclusive mode. For example, in + shared mode, WASAPI always uses floating point samples for mixing, but in exclusive mode it can be anything. Therefore, this function allows you to specify + which share mode you want information for. Note that not all backends and devices support shared or exclusive mode, in which case this function will fail if + the requested share mode is unsupported. + + This leaves pDeviceInfo unmodified in the result of an error. + */ + MA_API ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo); + + /* + Determines if the given context supports loopback mode. + + + Parameters + ---------- + pContext (in) + A pointer to the context getting queried. + + + Return Value + ------------ + MA_TRUE if the context supports loopback mode; MA_FALSE otherwise. + */ + MA_API ma_bool32 ma_context_is_loopback_supported(ma_context* pContext); + + + + /* + Initializes a device config with default settings. + + + Parameters + ---------- + deviceType (in) + The type of the device this config is being initialized for. This must set to one of the following: + + |-------------------------| + | Device Type | + |-------------------------| + | ma_device_type_playback | + | ma_device_type_capture | + | ma_device_type_duplex | + | ma_device_type_loopback | + |-------------------------| + + + Return Value + ------------ + A new device config object with default settings. You will typically want to adjust the config after this function returns. See remarks. + + + Thread Safety + ------------- + Safe. + + + Callback Safety + --------------- + Safe, but don't try initializing a device in a callback. + + + Remarks + ------- + The returned config will be initialized to defaults. You will normally want to customize a few variables before initializing the device. See Example 1 for a + typical configuration which sets the sample format, channel count, sample rate, data callback and user data. These are usually things you will want to change + before initializing the device. + + See `ma_device_init()` for details on specific configuration options. + + + Example 1 - Simple Configuration + -------------------------------- + The example below is what a program will typically want to configure for each device at a minimum. Notice how `ma_device_config_init()` is called first, and + then the returned object is modified directly. This is important because it ensures that your program continues to work as new configuration options are added + to the `ma_device_config` structure. + + ```c + ma_device_config config = ma_device_config_init(ma_device_type_playback); + config.playback.format = ma_format_f32; + config.playback.channels = 2; + config.sampleRate = 48000; + config.dataCallback = ma_data_callback; + config.pUserData = pMyUserData; + ``` + + + See Also + -------- + ma_device_init() + ma_device_init_ex() + */ + MA_API ma_device_config ma_device_config_init(ma_device_type deviceType); + + + /* + Initializes a device. + + A device represents a physical audio device. The idea is you send or receive audio data from the device to either play it back through a speaker, or capture it + from a microphone. Whether or not you should send or receive data from the device (or both) depends on the type of device you are initializing which can be + playback, capture, full-duplex or loopback. (Note that loopback mode is only supported on select backends.) Sending and receiving audio data to and from the + device is done via a callback which is fired by miniaudio at periodic time intervals. + + The frequency at which data is delivered to and from a device depends on the size of it's period. The size of the period can be defined in terms of PCM frames + or milliseconds, whichever is more convenient. Generally speaking, the smaller the period, the lower the latency at the expense of higher CPU usage and + increased risk of glitching due to the more frequent and granular data deliver intervals. The size of a period will depend on your requirements, but + miniaudio's defaults should work fine for most scenarios. If you're building a game you should leave this fairly small, whereas if you're building a simple + media player you can make it larger. Note that the period size you request is actually just a hint - miniaudio will tell the backend what you want, but the + backend is ultimately responsible for what it gives you. You cannot assume you will get exactly what you ask for. + + When delivering data to and from a device you need to make sure it's in the correct format which you can set through the device configuration. You just set the + format that you want to use and miniaudio will perform all of the necessary conversion for you internally. When delivering data to and from the callback you + can assume the format is the same as what you requested when you initialized the device. See Remarks for more details on miniaudio's data conversion pipeline. + + + Parameters + ---------- + pContext (in, optional) + A pointer to the context that owns the device. This can be null, in which case it creates a default context internally. + + pConfig (in) + A pointer to the device configuration. Cannot be null. See remarks for details. + + pDevice (out) + A pointer to the device object being initialized. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to + calling this at the same time as `ma_device_uninit()`. + + + Callback Safety + --------------- + Unsafe. It is not safe to call this inside any callback. + + + Remarks + ------- + Setting `pContext` to NULL will result in miniaudio creating a default context internally and is equivalent to passing in a context initialized like so: + + ```c + ma_context_init(NULL, 0, NULL, &context); + ``` + + Do not set `pContext` to NULL if you are needing to open multiple devices. You can, however, use NULL when initializing the first device, and then use + device.pContext for the initialization of other devices. + + The device can be configured via the `pConfig` argument. The config object is initialized with `ma_device_config_init()`. Individual configuration settings can + then be set directly on the structure. Below are the members of the `ma_device_config` object. + + deviceType + Must be `ma_device_type_playback`, `ma_device_type_capture`, `ma_device_type_duplex` of `ma_device_type_loopback`. + + sampleRate + The sample rate, in hertz. The most common sample rates are 48000 and 44100. Setting this to 0 will use the device's native sample rate. + + periodSizeInFrames + The desired size of a period in PCM frames. If this is 0, `periodSizeInMilliseconds` will be used instead. If both are 0 the default buffer size will + be used depending on the selected performance profile. This value affects latency. See below for details. + + periodSizeInMilliseconds + The desired size of a period in milliseconds. If this is 0, `periodSizeInFrames` will be used instead. If both are 0 the default buffer size will be + used depending on the selected performance profile. The value affects latency. See below for details. + + periods + The number of periods making up the device's entire buffer. The total buffer size is `periodSizeInFrames` or `periodSizeInMilliseconds` multiplied by + this value. This is just a hint as backends will be the ones who ultimately decide how your periods will be configured. + + performanceProfile + A hint to miniaudio as to the performance requirements of your program. Can be either `ma_performance_profile_low_latency` (default) or + `ma_performance_profile_conservative`. This mainly affects the size of default buffers and can usually be left at it's default value. + + noPreSilencedOutputBuffer + When set to true, the contents of the output buffer passed into the data callback will be left undefined. When set to false (default), the contents of + the output buffer will be cleared the zero. You can use this to avoid the overhead of zeroing out the buffer if you can guarantee that your data + callback will write to every sample in the output buffer, or if you are doing your own clearing. + + noClip + When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. When set to false (default), the + contents of the output buffer are left alone after returning and it will be left up to the backend itself to decide whether or not the clip. This only + applies when the playback sample format is f32. + + noDisableDenormals + By default, miniaudio will disable denormals when the data callback is called. Setting this to true will prevent the disabling of denormals. + + noFixedSizedCallback + Allows miniaudio to fire the data callback with any frame count. When this is set to false (the default), the data callback will be fired with a + consistent frame count as specified by `periodSizeInFrames` or `periodSizeInMilliseconds`. When set to true, miniaudio will fire the callback with + whatever the backend requests, which could be anything. + + dataCallback + The callback to fire whenever data is ready to be delivered to or from the device. + + notificationCallback + The callback to fire when something has changed with the device, such as whether or not it has been started or stopped. + + pUserData + The user data pointer to use with the device. You can access this directly from the device object like `device.pUserData`. + + resampling.algorithm + The resampling algorithm to use when miniaudio needs to perform resampling between the rate specified by `sampleRate` and the device's native rate. The + default value is `ma_resample_algorithm_linear`, and the quality can be configured with `resampling.linear.lpfOrder`. + + resampling.pBackendVTable + A pointer to an optional vtable that can be used for plugging in a custom resampler. + + resampling.pBackendUserData + A pointer that will passed to callbacks in pBackendVTable. + + resampling.linear.lpfOrder + The linear resampler applies a low-pass filter as part of it's processing for anti-aliasing. This setting controls the order of the filter. The higher + the value, the better the quality, in general. Setting this to 0 will disable low-pass filtering altogether. The maximum value is + `MA_MAX_FILTER_ORDER`. The default value is `min(4, MA_MAX_FILTER_ORDER)`. + + playback.pDeviceID + A pointer to a `ma_device_id` structure containing the ID of the playback device to initialize. Setting this NULL (default) will use the system's + default playback device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration. + + playback.format + The sample format to use for playback. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after + initialization from the device object directly with `device.playback.format`. + + playback.channels + The number of channels to use for playback. When set to 0 the device's native channel count will be used. This can be retrieved after initialization + from the device object directly with `device.playback.channels`. + + playback.pChannelMap + The channel map to use for playback. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the + device object direct with `device.playback.pChannelMap`. When set, the buffer should contain `channels` items. + + playback.shareMode + The preferred share mode to use for playback. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify + exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired by changing this to + ma_share_mode_shared and reinitializing. + + capture.pDeviceID + A pointer to a `ma_device_id` structure containing the ID of the capture device to initialize. Setting this NULL (default) will use the system's + default capture device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration. + + capture.format + The sample format to use for capture. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after + initialization from the device object directly with `device.capture.format`. + + capture.channels + The number of channels to use for capture. When set to 0 the device's native channel count will be used. This can be retrieved after initialization + from the device object directly with `device.capture.channels`. + + capture.pChannelMap + The channel map to use for capture. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the + device object direct with `device.capture.pChannelMap`. When set, the buffer should contain `channels` items. + + capture.shareMode + The preferred share mode to use for capture. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify + exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired by changing this to + ma_share_mode_shared and reinitializing. + + wasapi.noAutoConvertSRC + WASAPI only. When set to true, disables WASAPI's automatic resampling and forces the use of miniaudio's resampler. Defaults to false. + + wasapi.noDefaultQualitySRC + WASAPI only. Only used when `wasapi.noAutoConvertSRC` is set to false. When set to true, disables the use of `AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY`. + You should usually leave this set to false, which is the default. + + wasapi.noAutoStreamRouting + WASAPI only. When set to true, disables automatic stream routing on the WASAPI backend. Defaults to false. + + wasapi.noHardwareOffloading + WASAPI only. When set to true, disables the use of WASAPI's hardware offloading feature. Defaults to false. + + alsa.noMMap + ALSA only. When set to true, disables MMap mode. Defaults to false. + + alsa.noAutoFormat + ALSA only. When set to true, disables ALSA's automatic format conversion by including the SND_PCM_NO_AUTO_FORMAT flag. Defaults to false. + + alsa.noAutoChannels + ALSA only. When set to true, disables ALSA's automatic channel conversion by including the SND_PCM_NO_AUTO_CHANNELS flag. Defaults to false. + + alsa.noAutoResample + ALSA only. When set to true, disables ALSA's automatic resampling by including the SND_PCM_NO_AUTO_RESAMPLE flag. Defaults to false. + + pulse.pStreamNamePlayback + PulseAudio only. Sets the stream name for playback. + + pulse.pStreamNameCapture + PulseAudio only. Sets the stream name for capture. + + coreaudio.allowNominalSampleRateChange + Core Audio only. Desktop only. When enabled, allows the sample rate of the device to be changed at the operating system level. This + is disabled by default in order to prevent intrusive changes to the user's system. This is useful if you want to use a sample rate + that is known to be natively supported by the hardware thereby avoiding the cost of resampling. When set to true, miniaudio will + find the closest match between the sample rate requested in the device config and the sample rates natively supported by the + hardware. When set to false, the sample rate currently set by the operating system will always be used. + + opensl.streamType + OpenSL only. Explicitly sets the stream type. If left unset (`ma_opensl_stream_type_default`), the + stream type will be left unset. Think of this as the type of audio you're playing. + + opensl.recordingPreset + OpenSL only. Explicitly sets the type of recording your program will be doing. When left + unset, the recording preset will be left unchanged. + + aaudio.usage + AAudio only. Explicitly sets the nature of the audio the program will be consuming. When + left unset, the usage will be left unchanged. + + aaudio.contentType + AAudio only. Sets the content type. When left unset, the content type will be left unchanged. + + aaudio.inputPreset + AAudio only. Explicitly sets the type of recording your program will be doing. When left + unset, the input preset will be left unchanged. + + aaudio.noAutoStartAfterReroute + AAudio only. Controls whether or not the device should be automatically restarted after a + stream reroute. When set to false (default) the device will be restarted automatically; + otherwise the device will be stopped. + + + Once initialized, the device's config is immutable. If you need to change the config you will need to initialize a new device. + + After initializing the device it will be in a stopped state. To start it, use `ma_device_start()`. + + If both `periodSizeInFrames` and `periodSizeInMilliseconds` are set to zero, it will default to `MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY` or + `MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE`, depending on whether or not `performanceProfile` is set to `ma_performance_profile_low_latency` or + `ma_performance_profile_conservative`. + + If you request exclusive mode and the backend does not support it an error will be returned. For robustness, you may want to first try initializing the device + in exclusive mode, and then fall back to shared mode if required. Alternatively you can just request shared mode (the default if you leave it unset in the + config) which is the most reliable option. Some backends do not have a practical way of choosing whether or not the device should be exclusive or not (ALSA, + for example) in which case it just acts as a hint. Unless you have special requirements you should try avoiding exclusive mode as it's intrusive to the user. + Starting with Windows 10, miniaudio will use low-latency shared mode where possible which may make exclusive mode unnecessary. + + When sending or receiving data to/from a device, miniaudio will internally perform a format conversion to convert between the format specified by the config + and the format used internally by the backend. If you pass in 0 for the sample format, channel count, sample rate _and_ channel map, data transmission will run + on an optimized pass-through fast path. You can retrieve the format, channel count and sample rate by inspecting the `playback/capture.format`, + `playback/capture.channels` and `sampleRate` members of the device object. + + When compiling for UWP you must ensure you call this function on the main UI thread because the operating system may need to present the user with a message + asking for permissions. Please refer to the official documentation for ActivateAudioInterfaceAsync() for more information. + + ALSA Specific: When initializing the default device, requesting shared mode will try using the "dmix" device for playback and the "dsnoop" device for capture. + If these fail it will try falling back to the "hw" device. + + + Example 1 - Simple Initialization + --------------------------------- + This example shows how to initialize a simple playback device using a standard configuration. If you are just needing to do simple playback from the default + playback device this is usually all you need. + + ```c + ma_device_config config = ma_device_config_init(ma_device_type_playback); + config.playback.format = ma_format_f32; + config.playback.channels = 2; + config.sampleRate = 48000; + config.dataCallback = ma_data_callback; + config.pMyUserData = pMyUserData; + + ma_device device; + ma_result result = ma_device_init(NULL, &config, &device); + if (result != MA_SUCCESS) { + // Error + } + ``` + + + Example 2 - Advanced Initialization + ----------------------------------- + This example shows how you might do some more advanced initialization. In this hypothetical example we want to control the latency by setting the buffer size + and period count. We also want to allow the user to be able to choose which device to output from which means we need a context so we can perform device + enumeration. + + ```c + ma_context context; + ma_result result = ma_context_init(NULL, 0, NULL, &context); + if (result != MA_SUCCESS) { + // Error + } + + ma_device_info* pPlaybackDeviceInfos; + ma_uint32 playbackDeviceCount; + result = ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, NULL, NULL); + if (result != MA_SUCCESS) { + // Error + } + + // ... choose a device from pPlaybackDeviceInfos ... + + ma_device_config config = ma_device_config_init(ma_device_type_playback); + config.playback.pDeviceID = pMyChosenDeviceID; // <-- Get this from the `id` member of one of the `ma_device_info` objects returned by ma_context_get_devices(). + config.playback.format = ma_format_f32; + config.playback.channels = 2; + config.sampleRate = 48000; + config.dataCallback = ma_data_callback; + config.pUserData = pMyUserData; + config.periodSizeInMilliseconds = 10; + config.periods = 3; + + ma_device device; + result = ma_device_init(&context, &config, &device); + if (result != MA_SUCCESS) { + // Error + } + ``` + + + See Also + -------- + ma_device_config_init() + ma_device_uninit() + ma_device_start() + ma_context_init() + ma_context_get_devices() + ma_context_enumerate_devices() + */ + MA_API ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice); + + /* + Initializes a device without a context, with extra parameters for controlling the configuration of the internal self-managed context. + + This is the same as `ma_device_init()`, only instead of a context being passed in, the parameters from `ma_context_init()` are passed in instead. This function + allows you to configure the internally created context. + + + Parameters + ---------- + backends (in, optional) + A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order. + + backendCount (in, optional) + The number of items in `backend`. Ignored if `backend` is NULL. + + pContextConfig (in, optional) + The context configuration. + + pConfig (in) + A pointer to the device configuration. Cannot be null. See remarks for details. + + pDevice (out) + A pointer to the device object being initialized. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to + calling this at the same time as `ma_device_uninit()`. + + + Callback Safety + --------------- + Unsafe. It is not safe to call this inside any callback. + + + Remarks + ------- + You only need to use this function if you want to configure the context differently to it's defaults. You should never use this function if you want to manage + your own context. + + See the documentation for `ma_context_init()` for information on the different context configuration options. + + + See Also + -------- + ma_device_init() + ma_device_uninit() + ma_device_config_init() + ma_context_init() + */ + MA_API ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice); + + /* + Uninitializes a device. + + This will explicitly stop the device. You do not need to call `ma_device_stop()` beforehand, but it's harmless if you do. + + + Parameters + ---------- + pDevice (in) + A pointer to the device to stop. + + + Return Value + ------------ + Nothing + + + Thread Safety + ------------- + Unsafe. As soon as this API is called the device should be considered undefined. + + + Callback Safety + --------------- + Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock. + + + See Also + -------- + ma_device_init() + ma_device_stop() + */ + MA_API void ma_device_uninit(ma_device* pDevice); + + + /* + Retrieves a pointer to the context that owns the given device. + */ + MA_API ma_context* ma_device_get_context(ma_device* pDevice); + + /* + Helper function for retrieving the log object associated with the context that owns this device. + */ + MA_API ma_log* ma_device_get_log(ma_device* pDevice); + + + /* + Retrieves information about the device. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose information is being retrieved. + + type (in) + The device type. This parameter is required for duplex devices. When retrieving device + information, you are doing so for an individual playback or capture device. + + pDeviceInfo (out) + A pointer to the `ma_device_info` that will receive the device information. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Unsafe. This should be considered unsafe because it may be calling into the backend which may or + may not be safe. + + + Callback Safety + --------------- + Unsafe. You should avoid calling this in the data callback because it may call into the backend + which may or may not be safe. + */ + MA_API ma_result ma_device_get_info(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo); + + + /* + Retrieves the name of the device. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose information is being retrieved. + + type (in) + The device type. This parameter is required for duplex devices. When retrieving device + information, you are doing so for an individual playback or capture device. + + pName (out) + A pointer to the buffer that will receive the name. + + nameCap (in) + The capacity of the output buffer, including space for the null terminator. + + pLengthNotIncludingNullTerminator (out, optional) + A pointer to the variable that will receive the length of the name, not including the null + terminator. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Unsafe. This should be considered unsafe because it may be calling into the backend which may or + may not be safe. + + + Callback Safety + --------------- + Unsafe. You should avoid calling this in the data callback because it may call into the backend + which may or may not be safe. + + + Remarks + ------- + If the name does not fully fit into the output buffer, it'll be truncated. You can pass in NULL to + `pName` if you want to first get the length of the name for the purpose of memory allocation of the + output buffer. Allocating a buffer of size `MA_MAX_DEVICE_NAME_LENGTH + 1` should be enough for + most cases and will avoid the need for the inefficiency of calling this function twice. + + This is implemented in terms of `ma_device_get_info()`. + */ + MA_API ma_result ma_device_get_name(ma_device* pDevice, ma_device_type type, char* pName, size_t nameCap, size_t* pLengthNotIncludingNullTerminator); + + + /* + Starts the device. For playback devices this begins playback. For capture devices it begins recording. + + Use `ma_device_stop()` to stop the device. + + + Parameters + ---------- + pDevice (in) + A pointer to the device to start. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Safe. It's safe to call this from any thread with the exception of the callback thread. + + + Callback Safety + --------------- + Unsafe. It is not safe to call this inside any callback. + + + Remarks + ------- + For a playback device, this will retrieve an initial chunk of audio data from the client before returning. The reason for this is to ensure there is valid + audio data in the buffer, which needs to be done before the device begins playback. + + This API waits until the backend device has been started for real by the worker thread. It also waits on a mutex for thread-safety. + + Do not call this in any callback. + + + See Also + -------- + ma_device_stop() + */ + MA_API ma_result ma_device_start(ma_device* pDevice); + + /* + Stops the device. For playback devices this stops playback. For capture devices it stops recording. + + Use `ma_device_start()` to start the device again. + + + Parameters + ---------- + pDevice (in) + A pointer to the device to stop. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error code otherwise. + + + Thread Safety + ------------- + Safe. It's safe to call this from any thread with the exception of the callback thread. + + + Callback Safety + --------------- + Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock. + + + Remarks + ------- + This API needs to wait on the worker thread to stop the backend device properly before returning. It also waits on a mutex for thread-safety. In addition, some + backends need to wait for the device to finish playback/recording of the current fragment which can take some time (usually proportionate to the buffer size + that was specified at initialization time). + + Backends are required to either pause the stream in-place or drain the buffer if pausing is not possible. The reason for this is that stopping the device and + the resuming it with ma_device_start() (which you might do when your program loses focus) may result in a situation where those samples are never output to the + speakers or received from the microphone which can in turn result in de-syncs. + + Do not call this in any callback. + + This will be called implicitly by `ma_device_uninit()`. + + + See Also + -------- + ma_device_start() + */ + MA_API ma_result ma_device_stop(ma_device* pDevice); + + /* + Determines whether or not the device is started. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose start state is being retrieved. + + + Return Value + ------------ + True if the device is started, false otherwise. + + + Thread Safety + ------------- + Safe. If another thread calls `ma_device_start()` or `ma_device_stop()` at this same time as this function is called, there's a very small chance the return + value will be out of sync. + + + Callback Safety + --------------- + Safe. This is implemented as a simple accessor. + + + See Also + -------- + ma_device_start() + ma_device_stop() + */ + MA_API ma_bool32 ma_device_is_started(const ma_device* pDevice); + + + /* + Retrieves the state of the device. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose state is being retrieved. + + + Return Value + ------------ + The current state of the device. The return value will be one of the following: + + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_uninitialized | Will only be returned if the device is in the middle of initialization. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_stopped | The device is stopped. The initial state of the device after initialization. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_started | The device started and requesting and/or delivering audio data. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_starting | The device is in the process of starting. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_stopping | The device is in the process of stopping. | + +-------------------------------+------------------------------------------------------------------------------+ + + + Thread Safety + ------------- + Safe. This is implemented as a simple accessor. Note that if the device is started or stopped at the same time as this function is called, + there's a possibility the return value could be out of sync. See remarks. + + + Callback Safety + --------------- + Safe. This is implemented as a simple accessor. + + + Remarks + ------- + The general flow of a devices state goes like this: + + ``` + ma_device_init() -> ma_device_state_uninitialized -> ma_device_state_stopped + ma_device_start() -> ma_device_state_starting -> ma_device_state_started + ma_device_stop() -> ma_device_state_stopping -> ma_device_state_stopped + ``` + + When the state of the device is changed with `ma_device_start()` or `ma_device_stop()` at this same time as this function is called, the + value returned by this function could potentially be out of sync. If this is significant to your program you need to implement your own + synchronization. + */ + MA_API ma_device_state ma_device_get_state(const ma_device* pDevice); + + + /* + Performs post backend initialization routines for setting up internal data conversion. + + This should be called whenever the backend is initialized. The only time this should be called from + outside of miniaudio is if you're implementing a custom backend, and you would only do it if you + are reinitializing the backend due to rerouting or reinitializing for some reason. + + + Parameters + ---------- + pDevice [in] + A pointer to the device. + + deviceType [in] + The type of the device that was just reinitialized. + + pPlaybackDescriptor [in] + The descriptor of the playback device containing the internal data format and buffer sizes. + + pPlaybackDescriptor [in] + The descriptor of the capture device containing the internal data format and buffer sizes. + + + Return Value + ------------ + MA_SUCCESS if successful; any other error otherwise. + + + Thread Safety + ------------- + Unsafe. This will be reinitializing internal data converters which may be in use by another thread. + + + Callback Safety + --------------- + Unsafe. This will be reinitializing internal data converters which may be in use by the callback. + + + Remarks + ------- + For a duplex device, you can call this for only one side of the system. This is why the deviceType + is specified as a parameter rather than deriving it from the device. + + You do not need to call this manually unless you are doing a custom backend, in which case you need + only do it if you're manually performing rerouting or reinitialization. + */ + MA_API ma_result ma_device_post_init(ma_device* pDevice, ma_device_type deviceType, const ma_device_descriptor* pPlaybackDescriptor, const ma_device_descriptor* pCaptureDescriptor); + + + /* + Sets the master volume factor for the device. + + The volume factor must be between 0 (silence) and 1 (full volume). Use `ma_device_set_master_volume_db()` to use decibel notation, where 0 is full volume and + values less than 0 decreases the volume. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose volume is being set. + + volume (in) + The new volume factor. Must be >= 0. + + + Return Value + ------------ + MA_SUCCESS if the volume was set successfully. + MA_INVALID_ARGS if pDevice is NULL. + MA_INVALID_ARGS if volume is negative. + + + Thread Safety + ------------- + Safe. This just sets a local member of the device object. + + + Callback Safety + --------------- + Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied. + + + Remarks + ------- + This applies the volume factor across all channels. + + This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream. + + + See Also + -------- + ma_device_get_master_volume() + ma_device_set_master_volume_db() + ma_device_get_master_volume_db() + */ + MA_API ma_result ma_device_set_master_volume(ma_device* pDevice, float volume); + + /* + Retrieves the master volume factor for the device. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose volume factor is being retrieved. + + pVolume (in) + A pointer to the variable that will receive the volume factor. The returned value will be in the range of [0, 1]. + + + Return Value + ------------ + MA_SUCCESS if successful. + MA_INVALID_ARGS if pDevice is NULL. + MA_INVALID_ARGS if pVolume is NULL. + + + Thread Safety + ------------- + Safe. This just a simple member retrieval. + + + Callback Safety + --------------- + Safe. + + + Remarks + ------- + If an error occurs, `*pVolume` will be set to 0. + + + See Also + -------- + ma_device_set_master_volume() + ma_device_set_master_volume_gain_db() + ma_device_get_master_volume_gain_db() + */ + MA_API ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume); + + /* + Sets the master volume for the device as gain in decibels. + + A gain of 0 is full volume, whereas a gain of < 0 will decrease the volume. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose gain is being set. + + gainDB (in) + The new volume as gain in decibels. Must be less than or equal to 0, where 0 is full volume and anything less than 0 decreases the volume. + + + Return Value + ------------ + MA_SUCCESS if the volume was set successfully. + MA_INVALID_ARGS if pDevice is NULL. + MA_INVALID_ARGS if the gain is > 0. + + + Thread Safety + ------------- + Safe. This just sets a local member of the device object. + + + Callback Safety + --------------- + Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied. + + + Remarks + ------- + This applies the gain across all channels. + + This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream. + + + See Also + -------- + ma_device_get_master_volume_gain_db() + ma_device_set_master_volume() + ma_device_get_master_volume() + */ + MA_API ma_result ma_device_set_master_volume_db(ma_device* pDevice, float gainDB); + + /* + Retrieves the master gain in decibels. + + + Parameters + ---------- + pDevice (in) + A pointer to the device whose gain is being retrieved. + + pGainDB (in) + A pointer to the variable that will receive the gain in decibels. The returned value will be <= 0. + + + Return Value + ------------ + MA_SUCCESS if successful. + MA_INVALID_ARGS if pDevice is NULL. + MA_INVALID_ARGS if pGainDB is NULL. + + + Thread Safety + ------------- + Safe. This just a simple member retrieval. + + + Callback Safety + --------------- + Safe. + + + Remarks + ------- + If an error occurs, `*pGainDB` will be set to 0. + + + See Also + -------- + ma_device_set_master_volume_db() + ma_device_set_master_volume() + ma_device_get_master_volume() + */ + MA_API ma_result ma_device_get_master_volume_db(ma_device* pDevice, float* pGainDB); + + + /* + Called from the data callback of asynchronous backends to allow miniaudio to process the data and fire the miniaudio data callback. + + + Parameters + ---------- + pDevice (in) + A pointer to device whose processing the data callback. + + pOutput (out) + A pointer to the buffer that will receive the output PCM frame data. On a playback device this must not be NULL. On a duplex device + this can be NULL, in which case pInput must not be NULL. + + pInput (in) + A pointer to the buffer containing input PCM frame data. On a capture device this must not be NULL. On a duplex device this can be + NULL, in which case `pOutput` must not be NULL. + + frameCount (in) + The number of frames being processed. + + + Return Value + ------------ + MA_SUCCESS if successful; any other result code otherwise. + + + Thread Safety + ------------- + This function should only ever be called from the internal data callback of the backend. It is safe to call this simultaneously between a + playback and capture device in duplex setups. + + + Callback Safety + --------------- + Do not call this from the miniaudio data callback. It should only ever be called from the internal data callback of the backend. + + + Remarks + ------- + If both `pOutput` and `pInput` are NULL, and error will be returned. In duplex scenarios, both `pOutput` and `pInput` can be non-NULL, in + which case `pInput` will be processed first, followed by `pOutput`. + + If you are implementing a custom backend, and that backend uses a callback for data delivery, you'll need to call this from inside that + callback. + */ + MA_API ma_result ma_device_handle_backend_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + + + /* + Calculates an appropriate buffer size from a descriptor, native sample rate and performance profile. + + This function is used by backends for helping determine an appropriately sized buffer to use with + the device depending on the values of `periodSizeInFrames` and `periodSizeInMilliseconds` in the + `pDescriptor` object. Since buffer size calculations based on time depends on the sample rate, a + best guess at the device's native sample rate is also required which is where `nativeSampleRate` + comes in. In addition, the performance profile is also needed for cases where both the period size + in frames and milliseconds are both zero. + + + Parameters + ---------- + pDescriptor (in) + A pointer to device descriptor whose `periodSizeInFrames` and `periodSizeInMilliseconds` members + will be used for the calculation of the buffer size. + + nativeSampleRate (in) + The device's native sample rate. This is only ever used when the `periodSizeInFrames` member of + `pDescriptor` is zero. In this case, `periodSizeInMilliseconds` will be used instead, in which + case a sample rate is required to convert to a size in frames. + + performanceProfile (in) + When both the `periodSizeInFrames` and `periodSizeInMilliseconds` members of `pDescriptor` are + zero, miniaudio will fall back to a buffer size based on the performance profile. The profile + to use for this calculation is determine by this parameter. + + + Return Value + ------------ + The calculated buffer size in frames. + + + Thread Safety + ------------- + This is safe so long as nothing modifies `pDescriptor` at the same time. However, this function + should only ever be called from within the backend's device initialization routine and therefore + shouldn't have any multithreading concerns. + + + Callback Safety + --------------- + This is safe to call within the data callback, but there is no reason to ever do this. + + + Remarks + ------- + If `nativeSampleRate` is zero, this function will fall back to `pDescriptor->sampleRate`. If that + is also zero, `MA_DEFAULT_SAMPLE_RATE` will be used instead. + */ + MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_descriptor(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile); + + + + /* + Retrieves a friendly name for a backend. + */ + MA_API const char* ma_get_backend_name(ma_backend backend); + + /* + Retrieves the backend enum from the given name. + */ + MA_API ma_result ma_get_backend_from_name(const char* pBackendName, ma_backend* pBackend); + + /* + Determines whether or not the given backend is available by the compilation environment. + */ + MA_API ma_bool32 ma_is_backend_enabled(ma_backend backend); + + /* + Retrieves compile-time enabled backends. + + + Parameters + ---------- + pBackends (out, optional) + A pointer to the buffer that will receive the enabled backends. Set to NULL to retrieve the backend count. Setting + the capacity of the buffer to `MA_BUFFER_COUNT` will guarantee it's large enough for all backends. + + backendCap (in) + The capacity of the `pBackends` buffer. + + pBackendCount (out) + A pointer to the variable that will receive the enabled backend count. + + + Return Value + ------------ + MA_SUCCESS if successful. + MA_INVALID_ARGS if `pBackendCount` is NULL. + MA_NO_SPACE if the capacity of `pBackends` is not large enough. + + If `MA_NO_SPACE` is returned, the `pBackends` buffer will be filled with `*pBackendCount` values. + + + Thread Safety + ------------- + Safe. + + + Callback Safety + --------------- + Safe. + + + Remarks + ------- + If you want to retrieve the number of backends so you can determine the capacity of `pBackends` buffer, you can call + this function with `pBackends` set to NULL. + + This will also enumerate the null backend. If you don't want to include this you need to check for `ma_backend_null` + when you enumerate over the returned backends and handle it appropriately. Alternatively, you can disable it at + compile time with `MA_NO_NULL`. + + The returned backends are determined based on compile time settings, not the platform it's currently running on. For + example, PulseAudio will be returned if it was enabled at compile time, even when the user doesn't actually have + PulseAudio installed. + + + Example 1 + --------- + The example below retrieves the enabled backend count using a fixed sized buffer allocated on the stack. The buffer is + given a capacity of `MA_BACKEND_COUNT` which will guarantee it'll be large enough to store all available backends. + Since `MA_BACKEND_COUNT` is always a relatively small value, this should be suitable for most scenarios. + + ``` + ma_backend enabledBackends[MA_BACKEND_COUNT]; + size_t enabledBackendCount; + + result = ma_get_enabled_backends(enabledBackends, MA_BACKEND_COUNT, &enabledBackendCount); + if (result != MA_SUCCESS) { + // Failed to retrieve enabled backends. Should never happen in this example since all inputs are valid. + } + ``` + + + See Also + -------- + ma_is_backend_enabled() + */ + MA_API ma_result ma_get_enabled_backends(ma_backend* pBackends, size_t backendCap, size_t* pBackendCount); + + /* + Determines whether or not loopback mode is support by a backend. + */ + MA_API ma_bool32 ma_is_loopback_supported(ma_backend backend); + +#endif /* MA_NO_DEVICE_IO */ + + + + /************************************************************************************************************************************************************ + + Utilities + + ************************************************************************************************************************************************************/ + + /* + Calculates a buffer size in milliseconds from the specified number of frames and sample rate. + */ + MA_API ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate); + + /* + Calculates a buffer size in frames from the specified number of milliseconds and sample rate. + */ + MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate); + + /* + Copies PCM frames from one buffer to another. + */ + MA_API void ma_copy_pcm_frames(void* dst, const void* src, ma_uint64 frameCount, ma_format format, ma_uint32 channels); + + /* + Copies silent frames into the given buffer. + + Remarks + ------- + For all formats except `ma_format_u8`, the output buffer will be filled with 0. For `ma_format_u8` it will be filled with 128. The reason for this is that it + makes more sense for the purpose of mixing to initialize it to the center point. + */ + MA_API void ma_silence_pcm_frames(void* p, ma_uint64 frameCount, ma_format format, ma_uint32 channels); + + + /* + Offsets a pointer by the specified number of PCM frames. + */ + MA_API void* ma_offset_pcm_frames_ptr(void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels); + MA_API const void* ma_offset_pcm_frames_const_ptr(const void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels); + static MA_INLINE float* ma_offset_pcm_frames_ptr_f32(float* p, ma_uint64 offsetInFrames, ma_uint32 channels) { return (float*)ma_offset_pcm_frames_ptr((void*)p, offsetInFrames, ma_format_f32, channels); } + static MA_INLINE const float* ma_offset_pcm_frames_const_ptr_f32(const float* p, ma_uint64 offsetInFrames, ma_uint32 channels) { return (const float*)ma_offset_pcm_frames_const_ptr((const void*)p, offsetInFrames, ma_format_f32, channels); } + + + /* + Clips samples. + */ + MA_API void ma_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count); + MA_API void ma_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count); + MA_API void ma_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count); + MA_API void ma_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count); + MA_API void ma_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count); + MA_API void ma_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels); + + /* + Helper for applying a volume factor to samples. + + Note that the source and destination buffers can be the same, in which case it'll perform the operation in-place. + */ + MA_API void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint64 sampleCount, float factor); + MA_API void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint64 sampleCount, float factor); + MA_API void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint64 sampleCount, float factor); + MA_API void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint64 sampleCount, float factor); + MA_API void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint64 sampleCount, float factor); + + MA_API void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint64 sampleCount, float factor); + MA_API void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint64 sampleCount, float factor); + MA_API void ma_apply_volume_factor_s24(void* pSamples, ma_uint64 sampleCount, float factor); + MA_API void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint64 sampleCount, float factor); + MA_API void ma_apply_volume_factor_f32(float* pSamples, ma_uint64 sampleCount, float factor); + + MA_API void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pFramesOut, const ma_uint8* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pFramesOut, const ma_int16* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pFramesOut, const ma_int32* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_copy_and_apply_volume_factor_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor); + + MA_API void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_apply_volume_factor_pcm_frames_s24(void* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_apply_volume_factor_pcm_frames_f32(float* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); + MA_API void ma_apply_volume_factor_pcm_frames(void* pFrames, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor); + + MA_API void ma_copy_and_apply_volume_factor_per_channel_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float* pChannelGains); + + + MA_API void ma_copy_and_apply_volume_and_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count, float volume); + MA_API void ma_copy_and_apply_volume_and_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count, float volume); + MA_API void ma_copy_and_apply_volume_and_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count, float volume); + MA_API void ma_copy_and_apply_volume_and_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count, float volume); + MA_API void ma_copy_and_apply_volume_and_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count, float volume); + MA_API void ma_copy_and_apply_volume_and_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float volume); + + + /* + Helper for converting a linear factor to gain in decibels. + */ + MA_API float ma_volume_linear_to_db(float factor); + + /* + Helper for converting gain in decibels to a linear factor. + */ + MA_API float ma_volume_db_to_linear(float gain); + + + /* + Mixes the specified number of frames in floating point format with a volume factor. + + This will run on an optimized path when the volume is equal to 1. + */ + MA_API ma_result ma_mix_pcm_frames_f32(float* pDst, const float* pSrc, ma_uint64 frameCount, ma_uint32 channels, float volume); + + + + + /************************************************************************************************************************************************************ + + VFS + === + + The VFS object (virtual file system) is what's used to customize file access. This is useful in cases where stdio FILE* based APIs may not be entirely + appropriate for a given situation. + + ************************************************************************************************************************************************************/ + typedef void ma_vfs; + typedef ma_handle ma_vfs_file; + + typedef enum + { + MA_OPEN_MODE_READ = 0x00000001, + MA_OPEN_MODE_WRITE = 0x00000002 + } ma_open_mode_flags; + + typedef enum + { + ma_seek_origin_start, + ma_seek_origin_current, + ma_seek_origin_end /* Not used by decoders. */ + } ma_seek_origin; + + typedef struct + { + ma_uint64 sizeInBytes; + } ma_file_info; + + typedef struct + { + ma_result (* onOpen) (ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); + ma_result (* onOpenW)(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); + ma_result (* onClose)(ma_vfs* pVFS, ma_vfs_file file); + ma_result (* onRead) (ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead); + ma_result (* onWrite)(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten); + ma_result (* onSeek) (ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin); + ma_result (* onTell) (ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor); + ma_result (* onInfo) (ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo); + } ma_vfs_callbacks; + + MA_API ma_result ma_vfs_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); + MA_API ma_result ma_vfs_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); + MA_API ma_result ma_vfs_close(ma_vfs* pVFS, ma_vfs_file file); + MA_API ma_result ma_vfs_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead); + MA_API ma_result ma_vfs_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten); + MA_API ma_result ma_vfs_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin); + MA_API ma_result ma_vfs_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor); + MA_API ma_result ma_vfs_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo); + MA_API ma_result ma_vfs_open_and_read_file(ma_vfs* pVFS, const char* pFilePath, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks); + + typedef struct + { + ma_vfs_callbacks cb; + ma_allocation_callbacks allocationCallbacks; /* Only used for the wchar_t version of open() on non-Windows platforms. */ + } ma_default_vfs; + + MA_API ma_result ma_default_vfs_init(ma_default_vfs* pVFS, const ma_allocation_callbacks* pAllocationCallbacks); + + + + typedef ma_result (* ma_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead); + typedef ma_result (* ma_seek_proc)(void* pUserData, ma_int64 offset, ma_seek_origin origin); + typedef ma_result (* ma_tell_proc)(void* pUserData, ma_int64* pCursor); + + + +#if !defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING) + typedef enum + { + ma_encoding_format_unknown = 0, + ma_encoding_format_wav, + ma_encoding_format_flac, + ma_encoding_format_mp3, + ma_encoding_format_vorbis + } ma_encoding_format; +#endif + + /************************************************************************************************************************************************************ + + Decoding + ======== + + Decoders are independent of the main device API. Decoding APIs can be called freely inside the device's data callback, but they are not thread safe unless + you do your own synchronization. + + ************************************************************************************************************************************************************/ +#ifndef MA_NO_DECODING + typedef struct ma_decoder ma_decoder; + + + typedef struct + { + ma_format preferredFormat; + ma_uint32 seekPointCount; /* Set to > 0 to generate a seektable if the decoding backend supports it. */ + } ma_decoding_backend_config; + + MA_API ma_decoding_backend_config ma_decoding_backend_config_init(ma_format preferredFormat, ma_uint32 seekPointCount); + + + typedef struct + { + ma_result (* onInit )(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); + ma_result (* onInitFile )(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); /* Optional. */ + ma_result (* onInitFileW )(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); /* Optional. */ + ma_result (* onInitMemory)(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); /* Optional. */ + void (* onUninit )(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks); + } ma_decoding_backend_vtable; + + + typedef ma_result (* ma_decoder_read_proc)(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead); /* Returns the number of bytes read. */ + typedef ma_result (* ma_decoder_seek_proc)(ma_decoder* pDecoder, ma_int64 byteOffset, ma_seek_origin origin); + typedef ma_result (* ma_decoder_tell_proc)(ma_decoder* pDecoder, ma_int64* pCursor); + + typedef struct + { + ma_format format; /* Set to 0 or ma_format_unknown to use the stream's internal format. */ + ma_uint32 channels; /* Set to 0 to use the stream's internal channels. */ + ma_uint32 sampleRate; /* Set to 0 to use the stream's internal sample rate. */ + ma_channel* pChannelMap; + ma_channel_mix_mode channelMixMode; + ma_dither_mode ditherMode; + ma_resampler_config resampling; + ma_allocation_callbacks allocationCallbacks; + ma_encoding_format encodingFormat; + ma_uint32 seekPointCount; /* When set to > 0, specifies the number of seek points to use for the generation of a seek table. Not all decoding backends support this. */ + ma_decoding_backend_vtable** ppCustomBackendVTables; + ma_uint32 customBackendCount; + void* pCustomBackendUserData; + } ma_decoder_config; + + struct ma_decoder + { + ma_data_source_base ds; + ma_data_source* pBackend; /* The decoding backend we'll be pulling data from. */ + const ma_decoding_backend_vtable* pBackendVTable; /* The vtable for the decoding backend. This needs to be stored so we can access the onUninit() callback. */ + void* pBackendUserData; + ma_decoder_read_proc onRead; + ma_decoder_seek_proc onSeek; + ma_decoder_tell_proc onTell; + void* pUserData; + ma_uint64 readPointerInPCMFrames; /* In output sample rate. Used for keeping track of how many frames are available for decoding. */ + ma_format outputFormat; + ma_uint32 outputChannels; + ma_uint32 outputSampleRate; + ma_data_converter converter; /* Data conversion is achieved by running frames through this. */ + void* pInputCache; /* In input format. Can be null if it's not needed. */ + ma_uint64 inputCacheCap; /* The capacity of the input cache. */ + ma_uint64 inputCacheConsumed; /* The number of frames that have been consumed in the cache. Used for determining the next valid frame. */ + ma_uint64 inputCacheRemaining; /* The number of valid frames remaining in the cahce. */ + ma_allocation_callbacks allocationCallbacks; + union + { + struct + { + ma_vfs* pVFS; + ma_vfs_file file; + } vfs; + struct + { + const ma_uint8* pData; + size_t dataSize; + size_t currentReadPos; + } memory; /* Only used for decoders that were opened against a block of memory. */ + } data; + }; + + MA_API ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate); + MA_API ma_decoder_config ma_decoder_config_init_default(void); + + MA_API ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + MA_API ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + MA_API ma_result ma_decoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + MA_API ma_result ma_decoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + MA_API ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + MA_API ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + + /* + Uninitializes a decoder. + */ + MA_API ma_result ma_decoder_uninit(ma_decoder* pDecoder); + + /* + Reads PCM frames from the given decoder. + + This is not thread safe without your own synchronization. + */ + MA_API ma_result ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + + /* + Seeks to a PCM frame based on it's absolute index. + + This is not thread safe without your own synchronization. + */ + MA_API ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex); + + /* + Retrieves the decoder's output data format. + */ + MA_API ma_result ma_decoder_get_data_format(ma_decoder* pDecoder, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + + /* + Retrieves the current position of the read cursor in PCM frames. + */ + MA_API ma_result ma_decoder_get_cursor_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pCursor); + + /* + Retrieves the length of the decoder in PCM frames. + + Do not call this on streams of an undefined length, such as internet radio. + + If the length is unknown or an error occurs, 0 will be returned. + + This will always return 0 for Vorbis decoders. This is due to a limitation with stb_vorbis in push mode which is what miniaudio + uses internally. + + For MP3's, this will decode the entire file. Do not call this in time critical scenarios. + + This function is not thread safe without your own synchronization. + */ + MA_API ma_result ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pLength); + + /* + Retrieves the number of frames that can be read before reaching the end. + + This calls `ma_decoder_get_length_in_pcm_frames()` so you need to be aware of the rules for that function, in + particular ensuring you do not call it on streams of an undefined length, such as internet radio. + + If the total length of the decoder cannot be retrieved, such as with Vorbis decoders, `MA_NOT_IMPLEMENTED` will be + returned. + */ + MA_API ma_result ma_decoder_get_available_frames(ma_decoder* pDecoder, ma_uint64* pAvailableFrames); + + /* + Helper for opening and decoding a file into a heap allocated block of memory. Free the returned pointer with ma_free(). On input, + pConfig should be set to what you want. On output it will be set to what you got. + */ + MA_API ma_result ma_decode_from_vfs(ma_vfs* pVFS, const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut); + MA_API ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut); + MA_API ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut); + +#endif /* MA_NO_DECODING */ + + + /************************************************************************************************************************************************************ + + Encoding + ======== + + Encoders do not perform any format conversion for you. If your target format does not support the format, and error will be returned. + + ************************************************************************************************************************************************************/ +#ifndef MA_NO_ENCODING + typedef struct ma_encoder ma_encoder; + + typedef ma_result (* ma_encoder_write_proc) (ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite, size_t* pBytesWritten); + typedef ma_result (* ma_encoder_seek_proc) (ma_encoder* pEncoder, ma_int64 offset, ma_seek_origin origin); + typedef ma_result (* ma_encoder_init_proc) (ma_encoder* pEncoder); + typedef void (* ma_encoder_uninit_proc) (ma_encoder* pEncoder); + typedef ma_result (* ma_encoder_write_pcm_frames_proc)(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten); + + typedef struct + { + ma_encoding_format encodingFormat; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_allocation_callbacks allocationCallbacks; + } ma_encoder_config; + + MA_API ma_encoder_config ma_encoder_config_init(ma_encoding_format encodingFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate); + + struct ma_encoder + { + ma_encoder_config config; + ma_encoder_write_proc onWrite; + ma_encoder_seek_proc onSeek; + ma_encoder_init_proc onInit; + ma_encoder_uninit_proc onUninit; + ma_encoder_write_pcm_frames_proc onWritePCMFrames; + void* pUserData; + void* pInternalEncoder; + union + { + struct + { + ma_vfs* pVFS; + ma_vfs_file file; + } vfs; + } data; + }; + + MA_API ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder); + MA_API ma_result ma_encoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); + MA_API ma_result ma_encoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); + MA_API ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); + MA_API ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); + MA_API void ma_encoder_uninit(ma_encoder* pEncoder); + MA_API ma_result ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten); + +#endif /* MA_NO_ENCODING */ + + + /************************************************************************************************************************************************************ + + Generation + + ************************************************************************************************************************************************************/ +#ifndef MA_NO_GENERATION + typedef enum + { + ma_waveform_type_sine, + ma_waveform_type_square, + ma_waveform_type_triangle, + ma_waveform_type_sawtooth + } ma_waveform_type; + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_waveform_type type; + double amplitude; + double frequency; + } ma_waveform_config; + + MA_API ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency); + + typedef struct + { + ma_data_source_base ds; + ma_waveform_config config; + double advance; + double time; + } ma_waveform; + + MA_API ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform); + MA_API void ma_waveform_uninit(ma_waveform* pWaveform); + MA_API ma_result ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_result ma_waveform_seek_to_pcm_frame(ma_waveform* pWaveform, ma_uint64 frameIndex); + MA_API ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude); + MA_API ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency); + MA_API ma_result ma_waveform_set_type(ma_waveform* pWaveform, ma_waveform_type type); + MA_API ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate); + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double dutyCycle; + double amplitude; + double frequency; + } ma_pulsewave_config; + + MA_API ma_pulsewave_config ma_pulsewave_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double dutyCycle, double amplitude, double frequency); + + typedef struct + { + ma_waveform waveform; + ma_pulsewave_config config; + } ma_pulsewave; + + MA_API ma_result ma_pulsewave_init(const ma_pulsewave_config* pConfig, ma_pulsewave* pWaveform); + MA_API void ma_pulsewave_uninit(ma_pulsewave* pWaveform); + MA_API ma_result ma_pulsewave_read_pcm_frames(ma_pulsewave* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_result ma_pulsewave_seek_to_pcm_frame(ma_pulsewave* pWaveform, ma_uint64 frameIndex); + MA_API ma_result ma_pulsewave_set_amplitude(ma_pulsewave* pWaveform, double amplitude); + MA_API ma_result ma_pulsewave_set_frequency(ma_pulsewave* pWaveform, double frequency); + MA_API ma_result ma_pulsewave_set_sample_rate(ma_pulsewave* pWaveform, ma_uint32 sampleRate); + MA_API ma_result ma_pulsewave_set_duty_cycle(ma_pulsewave* pWaveform, double dutyCycle); + + typedef enum + { + ma_noise_type_white, + ma_noise_type_pink, + ma_noise_type_brownian + } ma_noise_type; + + + typedef struct + { + ma_format format; + ma_uint32 channels; + ma_noise_type type; + ma_int32 seed; + double amplitude; + ma_bool32 duplicateChannels; + } ma_noise_config; + + MA_API ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude); + + typedef struct + { + ma_data_source_vtable ds; + ma_noise_config config; + ma_lcg lcg; + union + { + struct + { + double** bin; + double* accumulation; + ma_uint32* counter; + } pink; + struct + { + double* accumulation; + } brownian; + } state; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; + } ma_noise; + + MA_API ma_result ma_noise_get_heap_size(const ma_noise_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_noise_init_preallocated(const ma_noise_config* pConfig, void* pHeap, ma_noise* pNoise); + MA_API ma_result ma_noise_init(const ma_noise_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_noise* pNoise); + MA_API void ma_noise_uninit(ma_noise* pNoise, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_result ma_noise_set_amplitude(ma_noise* pNoise, double amplitude); + MA_API ma_result ma_noise_set_seed(ma_noise* pNoise, ma_int32 seed); + MA_API ma_result ma_noise_set_type(ma_noise* pNoise, ma_noise_type type); + +#endif /* MA_NO_GENERATION */ + + + + /************************************************************************************************************************************************************ + + Resource Manager + + ************************************************************************************************************************************************************/ + /* The resource manager cannot be enabled if there is no decoder. */ +#if !defined(MA_NO_RESOURCE_MANAGER) && defined(MA_NO_DECODING) +#define MA_NO_RESOURCE_MANAGER +#endif + +#ifndef MA_NO_RESOURCE_MANAGER + typedef struct ma_resource_manager ma_resource_manager; + typedef struct ma_resource_manager_data_buffer_node ma_resource_manager_data_buffer_node; + typedef struct ma_resource_manager_data_buffer ma_resource_manager_data_buffer; + typedef struct ma_resource_manager_data_stream ma_resource_manager_data_stream; + typedef struct ma_resource_manager_data_source ma_resource_manager_data_source; + + typedef enum + { + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM = 0x00000001, /* When set, does not load the entire data source in memory. Disk I/O will happen on job threads. */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE = 0x00000002, /* Decode data before storing in memory. When set, decoding is done at the resource manager level rather than the mixing thread. Results in faster mixing, but higher memory usage. */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC = 0x00000004, /* When set, the resource manager will load the data source asynchronously. */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT = 0x00000008, /* When set, waits for initialization of the underlying data source before returning from ma_resource_manager_data_source_init(). */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH = 0x00000010 /* Gives the resource manager a hint that the length of the data source is unknown and calling `ma_data_source_get_length_in_pcm_frames()` should be avoided. */ + } ma_resource_manager_data_source_flags; + + + /* + Pipeline notifications used by the resource manager. Made up of both an async notification and a fence, both of which are optional. + */ + typedef struct + { + ma_async_notification* pNotification; + ma_fence* pFence; + } ma_resource_manager_pipeline_stage_notification; + + typedef struct + { + ma_resource_manager_pipeline_stage_notification init; /* Initialization of the decoder. */ + ma_resource_manager_pipeline_stage_notification done; /* Decoding fully completed. */ + } ma_resource_manager_pipeline_notifications; + + MA_API ma_resource_manager_pipeline_notifications ma_resource_manager_pipeline_notifications_init(void); + + + + /* BEGIN BACKWARDS COMPATIBILITY */ + /* TODO: Remove this block in version 0.12. */ +#if 1 +#define ma_resource_manager_job ma_job +#define ma_resource_manager_job_init ma_job_init +#define MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_FLAG_NON_BLOCKING MA_JOB_QUEUE_FLAG_NON_BLOCKING +#define ma_resource_manager_job_queue_config ma_job_queue_config +#define ma_resource_manager_job_queue_config_init ma_job_queue_config_init +#define ma_resource_manager_job_queue ma_job_queue +#define ma_resource_manager_job_queue_get_heap_size ma_job_queue_get_heap_size +#define ma_resource_manager_job_queue_init_preallocated ma_job_queue_init_preallocated +#define ma_resource_manager_job_queue_init ma_job_queue_init +#define ma_resource_manager_job_queue_uninit ma_job_queue_uninit +#define ma_resource_manager_job_queue_post ma_job_queue_post +#define ma_resource_manager_job_queue_next ma_job_queue_next +#endif + /* END BACKWARDS COMPATIBILITY */ + + + + + /* Maximum job thread count will be restricted to this, but this may be removed later and replaced with a heap allocation thereby removing any limitation. */ +#ifndef MA_RESOURCE_MANAGER_MAX_JOB_THREAD_COUNT +#define MA_RESOURCE_MANAGER_MAX_JOB_THREAD_COUNT 64 +#endif + + typedef enum + { + /* Indicates ma_resource_manager_next_job() should not block. Only valid when the job thread count is 0. */ + MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING = 0x00000001, + + /* Disables any kind of multithreading. Implicitly enables MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING. */ + MA_RESOURCE_MANAGER_FLAG_NO_THREADING = 0x00000002 + } ma_resource_manager_flags; + + typedef struct + { + const char* pFilePath; + const wchar_t* pFilePathW; + const ma_resource_manager_pipeline_notifications* pNotifications; + ma_uint64 initialSeekPointInPCMFrames; + ma_uint64 rangeBegInPCMFrames; + ma_uint64 rangeEndInPCMFrames; + ma_uint64 loopPointBegInPCMFrames; + ma_uint64 loopPointEndInPCMFrames; + ma_bool32 isLooping; + ma_uint32 flags; + } ma_resource_manager_data_source_config; + + MA_API ma_resource_manager_data_source_config ma_resource_manager_data_source_config_init(void); + + + typedef enum + { + ma_resource_manager_data_supply_type_unknown = 0, /* Used for determining whether or the data supply has been initialized. */ + ma_resource_manager_data_supply_type_encoded, /* Data supply is an encoded buffer. Connector is ma_decoder. */ + ma_resource_manager_data_supply_type_decoded, /* Data supply is a decoded buffer. Connector is ma_audio_buffer. */ + ma_resource_manager_data_supply_type_decoded_paged /* Data supply is a linked list of decoded buffers. Connector is ma_paged_audio_buffer. */ + } ma_resource_manager_data_supply_type; + + typedef struct + { + MA_ATOMIC(4, ma_resource_manager_data_supply_type) type; /* Read and written from different threads so needs to be accessed atomically. */ + union + { + struct + { + const void* pData; + size_t sizeInBytes; + } encoded; + struct + { + const void* pData; + ma_uint64 totalFrameCount; + ma_uint64 decodedFrameCount; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + } decoded; + struct + { + ma_paged_audio_buffer_data data; + ma_uint64 decodedFrameCount; + ma_uint32 sampleRate; + } decodedPaged; + } backend; + } ma_resource_manager_data_supply; + + struct ma_resource_manager_data_buffer_node + { + ma_uint32 hashedName32; /* The hashed name. This is the key. */ + ma_uint32 refCount; + MA_ATOMIC(4, ma_result) result; /* Result from asynchronous loading. When loading set to MA_BUSY. When fully loaded set to MA_SUCCESS. When deleting set to MA_UNAVAILABLE. */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ + ma_bool32 isDataOwnedByResourceManager; /* Set to true when the underlying data buffer was allocated the resource manager. Set to false if it is owned by the application (via ma_resource_manager_register_*()). */ + ma_resource_manager_data_supply data; + ma_resource_manager_data_buffer_node* pParent; + ma_resource_manager_data_buffer_node* pChildLo; + ma_resource_manager_data_buffer_node* pChildHi; + }; + + struct ma_resource_manager_data_buffer + { + ma_data_source_base ds; /* Base data source. A data buffer is a data source. */ + ma_resource_manager* pResourceManager; /* A pointer to the resource manager that owns this buffer. */ + ma_resource_manager_data_buffer_node* pNode; /* The data node. This is reference counted and is what supplies the data. */ + ma_uint32 flags; /* The flags that were passed used to initialize the buffer. */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ + ma_uint64 seekTargetInPCMFrames; /* Only updated by the public API. Never written nor read from the job thread. */ + ma_bool32 seekToCursorOnNextRead; /* On the next read we need to seek to the frame cursor. */ + MA_ATOMIC(4, ma_result) result; /* Keeps track of a result of decoding. Set to MA_BUSY while the buffer is still loading. Set to MA_SUCCESS when loading is finished successfully. Otherwise set to some other code. */ + MA_ATOMIC(4, ma_bool32) isLooping; /* Can be read and written by different threads at the same time. Must be used atomically. */ + ma_atomic_bool32 isConnectorInitialized; /* Used for asynchronous loading to ensure we don't try to initialize the connector multiple times while waiting for the node to fully load. */ + union + { + ma_decoder decoder; /* Supply type is ma_resource_manager_data_supply_type_encoded */ + ma_audio_buffer buffer; /* Supply type is ma_resource_manager_data_supply_type_decoded */ + ma_paged_audio_buffer pagedBuffer; /* Supply type is ma_resource_manager_data_supply_type_decoded_paged */ + } connector; /* Connects this object to the node's data supply. */ + }; + + struct ma_resource_manager_data_stream + { + ma_data_source_base ds; /* Base data source. A data stream is a data source. */ + ma_resource_manager* pResourceManager; /* A pointer to the resource manager that owns this data stream. */ + ma_uint32 flags; /* The flags that were passed used to initialize the stream. */ + ma_decoder decoder; /* Used for filling pages with data. This is only ever accessed by the job thread. The public API should never touch this. */ + ma_bool32 isDecoderInitialized; /* Required for determining whether or not the decoder should be uninitialized in MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM. */ + ma_uint64 totalLengthInPCMFrames; /* This is calculated when first loaded by the MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM. */ + ma_uint32 relativeCursor; /* The playback cursor, relative to the current page. Only ever accessed by the public API. Never accessed by the job thread. */ + MA_ATOMIC(8, ma_uint64) absoluteCursor; /* The playback cursor, in absolute position starting from the start of the file. */ + ma_uint32 currentPageIndex; /* Toggles between 0 and 1. Index 0 is the first half of pPageData. Index 1 is the second half. Only ever accessed by the public API. Never accessed by the job thread. */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ + + /* Written by the public API, read by the job thread. */ + MA_ATOMIC(4, ma_bool32) isLooping; /* Whether or not the stream is looping. It's important to set the looping flag at the data stream level for smooth loop transitions. */ + + /* Written by the job thread, read by the public API. */ + void* pPageData; /* Buffer containing the decoded data of each page. Allocated once at initialization time. */ + MA_ATOMIC(4, ma_uint32) pageFrameCount[2]; /* The number of valid PCM frames in each page. Used to determine the last valid frame. */ + + /* Written and read by both the public API and the job thread. These must be atomic. */ + MA_ATOMIC(4, ma_result) result; /* Result from asynchronous loading. When loading set to MA_BUSY. When initialized set to MA_SUCCESS. When deleting set to MA_UNAVAILABLE. If an error occurs when loading, set to an error code. */ + MA_ATOMIC(4, ma_bool32) isDecoderAtEnd; /* Whether or not the decoder has reached the end. */ + MA_ATOMIC(4, ma_bool32) isPageValid[2]; /* Booleans to indicate whether or not a page is valid. Set to false by the public API, set to true by the job thread. Set to false as the pages are consumed, true when they are filled. */ + MA_ATOMIC(4, ma_bool32) seekCounter; /* When 0, no seeking is being performed. When > 0, a seek is being performed and reading should be delayed with MA_BUSY. */ + }; + + struct ma_resource_manager_data_source + { + union + { + ma_resource_manager_data_buffer buffer; + ma_resource_manager_data_stream stream; + } backend; /* Must be the first item because we need the first item to be the data source callbacks for the buffer or stream. */ + + ma_uint32 flags; /* The flags that were passed in to ma_resource_manager_data_source_init(). */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ + }; + + typedef struct + { + ma_allocation_callbacks allocationCallbacks; + ma_log* pLog; + ma_format decodedFormat; /* The decoded format to use. Set to ma_format_unknown (default) to use the file's native format. */ + ma_uint32 decodedChannels; /* The decoded channel count to use. Set to 0 (default) to use the file's native channel count. */ + ma_uint32 decodedSampleRate; /* the decoded sample rate to use. Set to 0 (default) to use the file's native sample rate. */ + ma_uint32 jobThreadCount; /* Set to 0 if you want to self-manage your job threads. Defaults to 1. */ + size_t jobThreadStackSize; + ma_uint32 jobQueueCapacity; /* The maximum number of jobs that can fit in the queue at a time. Defaults to MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY. Cannot be zero. */ + ma_uint32 flags; + ma_vfs* pVFS; /* Can be NULL in which case defaults will be used. */ + ma_decoding_backend_vtable** ppCustomDecodingBackendVTables; + ma_uint32 customDecodingBackendCount; + void* pCustomDecodingBackendUserData; + } ma_resource_manager_config; + + MA_API ma_resource_manager_config ma_resource_manager_config_init(void); + + struct ma_resource_manager + { + ma_resource_manager_config config; + ma_resource_manager_data_buffer_node* pRootDataBufferNode; /* The root buffer in the binary tree. */ +#ifndef MA_NO_THREADING + ma_mutex dataBufferBSTLock; /* For synchronizing access to the data buffer binary tree. */ + ma_thread jobThreads[MA_RESOURCE_MANAGER_MAX_JOB_THREAD_COUNT]; /* The threads for executing jobs. */ +#endif + ma_job_queue jobQueue; /* Multi-consumer, multi-producer job queue for managing jobs for asynchronous decoding and streaming. */ + ma_default_vfs defaultVFS; /* Only used if a custom VFS is not specified. */ + ma_log log; /* Only used if no log was specified in the config. */ + }; + + /* Init. */ + MA_API ma_result ma_resource_manager_init(const ma_resource_manager_config* pConfig, ma_resource_manager* pResourceManager); + MA_API void ma_resource_manager_uninit(ma_resource_manager* pResourceManager); + MA_API ma_log* ma_resource_manager_get_log(ma_resource_manager* pResourceManager); + + /* Registration. */ + MA_API ma_result ma_resource_manager_register_file(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags); + MA_API ma_result ma_resource_manager_register_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags); + MA_API ma_result ma_resource_manager_register_decoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate); /* Does not copy. Increments the reference count if already exists and returns MA_SUCCESS. */ + MA_API ma_result ma_resource_manager_register_decoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate); + MA_API ma_result ma_resource_manager_register_encoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, size_t sizeInBytes); /* Does not copy. Increments the reference count if already exists and returns MA_SUCCESS. */ + MA_API ma_result ma_resource_manager_register_encoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, size_t sizeInBytes); + MA_API ma_result ma_resource_manager_unregister_file(ma_resource_manager* pResourceManager, const char* pFilePath); + MA_API ma_result ma_resource_manager_unregister_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath); + MA_API ma_result ma_resource_manager_unregister_data(ma_resource_manager* pResourceManager, const char* pName); + MA_API ma_result ma_resource_manager_unregister_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName); + + /* Data Buffers. */ + MA_API ma_result ma_resource_manager_data_buffer_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_buffer* pDataBuffer); + MA_API ma_result ma_resource_manager_data_buffer_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer); + MA_API ma_result ma_resource_manager_data_buffer_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer); + MA_API ma_result ma_resource_manager_data_buffer_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_buffer* pExistingDataBuffer, ma_resource_manager_data_buffer* pDataBuffer); + MA_API ma_result ma_resource_manager_data_buffer_uninit(ma_resource_manager_data_buffer* pDataBuffer); + MA_API ma_result ma_resource_manager_data_buffer_read_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_result ma_resource_manager_data_buffer_seek_to_pcm_frame(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64 frameIndex); + MA_API ma_result ma_resource_manager_data_buffer_get_data_format(ma_resource_manager_data_buffer* pDataBuffer, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_resource_manager_data_buffer_get_cursor_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pCursor); + MA_API ma_result ma_resource_manager_data_buffer_get_length_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pLength); + MA_API ma_result ma_resource_manager_data_buffer_result(const ma_resource_manager_data_buffer* pDataBuffer); + MA_API ma_result ma_resource_manager_data_buffer_set_looping(ma_resource_manager_data_buffer* pDataBuffer, ma_bool32 isLooping); + MA_API ma_bool32 ma_resource_manager_data_buffer_is_looping(const ma_resource_manager_data_buffer* pDataBuffer); + MA_API ma_result ma_resource_manager_data_buffer_get_available_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pAvailableFrames); + + /* Data Streams. */ + MA_API ma_result ma_resource_manager_data_stream_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_stream* pDataStream); + MA_API ma_result ma_resource_manager_data_stream_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream); + MA_API ma_result ma_resource_manager_data_stream_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream); + MA_API ma_result ma_resource_manager_data_stream_uninit(ma_resource_manager_data_stream* pDataStream); + MA_API ma_result ma_resource_manager_data_stream_read_pcm_frames(ma_resource_manager_data_stream* pDataStream, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_result ma_resource_manager_data_stream_seek_to_pcm_frame(ma_resource_manager_data_stream* pDataStream, ma_uint64 frameIndex); + MA_API ma_result ma_resource_manager_data_stream_get_data_format(ma_resource_manager_data_stream* pDataStream, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_resource_manager_data_stream_get_cursor_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pCursor); + MA_API ma_result ma_resource_manager_data_stream_get_length_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pLength); + MA_API ma_result ma_resource_manager_data_stream_result(const ma_resource_manager_data_stream* pDataStream); + MA_API ma_result ma_resource_manager_data_stream_set_looping(ma_resource_manager_data_stream* pDataStream, ma_bool32 isLooping); + MA_API ma_bool32 ma_resource_manager_data_stream_is_looping(const ma_resource_manager_data_stream* pDataStream); + MA_API ma_result ma_resource_manager_data_stream_get_available_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pAvailableFrames); + + /* Data Sources. */ + MA_API ma_result ma_resource_manager_data_source_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_source* pDataSource); + MA_API ma_result ma_resource_manager_data_source_init(ma_resource_manager* pResourceManager, const char* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource); + MA_API ma_result ma_resource_manager_data_source_init_w(ma_resource_manager* pResourceManager, const wchar_t* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource); + MA_API ma_result ma_resource_manager_data_source_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source* pExistingDataSource, ma_resource_manager_data_source* pDataSource); + MA_API ma_result ma_resource_manager_data_source_uninit(ma_resource_manager_data_source* pDataSource); + MA_API ma_result ma_resource_manager_data_source_read_pcm_frames(ma_resource_manager_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_result ma_resource_manager_data_source_seek_to_pcm_frame(ma_resource_manager_data_source* pDataSource, ma_uint64 frameIndex); + MA_API ma_result ma_resource_manager_data_source_get_data_format(ma_resource_manager_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_resource_manager_data_source_get_cursor_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pCursor); + MA_API ma_result ma_resource_manager_data_source_get_length_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pLength); + MA_API ma_result ma_resource_manager_data_source_result(const ma_resource_manager_data_source* pDataSource); + MA_API ma_result ma_resource_manager_data_source_set_looping(ma_resource_manager_data_source* pDataSource, ma_bool32 isLooping); + MA_API ma_bool32 ma_resource_manager_data_source_is_looping(const ma_resource_manager_data_source* pDataSource); + MA_API ma_result ma_resource_manager_data_source_get_available_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pAvailableFrames); + + /* Job management. */ + MA_API ma_result ma_resource_manager_post_job(ma_resource_manager* pResourceManager, const ma_job* pJob); + MA_API ma_result ma_resource_manager_post_job_quit(ma_resource_manager* pResourceManager); /* Helper for posting a quit job. */ + MA_API ma_result ma_resource_manager_next_job(ma_resource_manager* pResourceManager, ma_job* pJob); + MA_API ma_result ma_resource_manager_process_job(ma_resource_manager* pResourceManager, ma_job* pJob); /* DEPRECATED. Use ma_job_process(). Will be removed in version 0.12. */ + MA_API ma_result ma_resource_manager_process_next_job(ma_resource_manager* pResourceManager); /* Returns MA_CANCELLED if a MA_JOB_TYPE_QUIT job is found. In non-blocking mode, returns MA_NO_DATA_AVAILABLE if no jobs are available. */ +#endif /* MA_NO_RESOURCE_MANAGER */ + + + + /************************************************************************************************************************************************************ + + Node Graph + + ************************************************************************************************************************************************************/ +#ifndef MA_NO_NODE_GRAPH + /* Must never exceed 254. */ +#ifndef MA_MAX_NODE_BUS_COUNT +#define MA_MAX_NODE_BUS_COUNT 254 +#endif + + /* Used internally by miniaudio for memory management. Must never exceed MA_MAX_NODE_BUS_COUNT. */ +#ifndef MA_MAX_NODE_LOCAL_BUS_COUNT +#define MA_MAX_NODE_LOCAL_BUS_COUNT 2 +#endif + + /* Use this when the bus count is determined by the node instance rather than the vtable. */ +#define MA_NODE_BUS_COUNT_UNKNOWN 255 + + typedef struct ma_node_graph ma_node_graph; + typedef void ma_node; + + + /* Node flags. */ + typedef enum + { + MA_NODE_FLAG_PASSTHROUGH = 0x00000001, + MA_NODE_FLAG_CONTINUOUS_PROCESSING = 0x00000002, + MA_NODE_FLAG_ALLOW_NULL_INPUT = 0x00000004, + MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES = 0x00000008, + MA_NODE_FLAG_SILENT_OUTPUT = 0x00000010 + } ma_node_flags; + + + /* The playback state of a node. Either started or stopped. */ + typedef enum + { + ma_node_state_started = 0, + ma_node_state_stopped = 1 + } ma_node_state; + + + typedef struct + { + /* + Extended processing callback. This callback is used for effects that process input and output + at different rates (i.e. they perform resampling). This is similar to the simple version, only + they take two seperate frame counts: one for input, and one for output. + + On input, `pFrameCountOut` is equal to the capacity of the output buffer for each bus, whereas + `pFrameCountIn` will be equal to the number of PCM frames in each of the buffers in `ppFramesIn`. + + On output, set `pFrameCountOut` to the number of PCM frames that were actually output and set + `pFrameCountIn` to the number of input frames that were consumed. + */ + void (* onProcess)(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut); + + /* + A callback for retrieving the number of a input frames that are required to output the + specified number of output frames. You would only want to implement this when the node performs + resampling. This is optional, even for nodes that perform resampling, but it does offer a + small reduction in latency as it allows miniaudio to calculate the exact number of input frames + to read at a time instead of having to estimate. + */ + ma_result (* onGetRequiredInputFrameCount)(ma_node* pNode, ma_uint32 outputFrameCount, ma_uint32* pInputFrameCount); + + /* + The number of input buses. This is how many sub-buffers will be contained in the `ppFramesIn` + parameters of the callbacks above. + */ + ma_uint8 inputBusCount; + + /* + The number of output buses. This is how many sub-buffers will be contained in the `ppFramesOut` + parameters of the callbacks above. + */ + ma_uint8 outputBusCount; + + /* + Flags describing characteristics of the node. This is currently just a placeholder for some + ideas for later on. + */ + ma_uint32 flags; + } ma_node_vtable; + + typedef struct + { + const ma_node_vtable* vtable; /* Should never be null. Initialization of the node will fail if so. */ + ma_node_state initialState; /* Defaults to ma_node_state_started. */ + ma_uint32 inputBusCount; /* Only used if the vtable specifies an input bus count of `MA_NODE_BUS_COUNT_UNKNOWN`, otherwise must be set to `MA_NODE_BUS_COUNT_UNKNOWN` (default). */ + ma_uint32 outputBusCount; /* Only used if the vtable specifies an output bus count of `MA_NODE_BUS_COUNT_UNKNOWN`, otherwise be set to `MA_NODE_BUS_COUNT_UNKNOWN` (default). */ + const ma_uint32* pInputChannels; /* The number of elements are determined by the input bus count as determined by the vtable, or `inputBusCount` if the vtable specifies `MA_NODE_BUS_COUNT_UNKNOWN`. */ + const ma_uint32* pOutputChannels; /* The number of elements are determined by the output bus count as determined by the vtable, or `outputBusCount` if the vtable specifies `MA_NODE_BUS_COUNT_UNKNOWN`. */ + } ma_node_config; + + MA_API ma_node_config ma_node_config_init(void); + + + /* + A node has multiple output buses. An output bus is attached to an input bus as an item in a linked + list. Think of the input bus as a linked list, with the output bus being an item in that list. + */ + typedef struct ma_node_output_bus ma_node_output_bus; + struct ma_node_output_bus + { + /* Immutable. */ + ma_node* pNode; /* The node that owns this output bus. The input node. Will be null for dummy head and tail nodes. */ + ma_uint8 outputBusIndex; /* The index of the output bus on pNode that this output bus represents. */ + ma_uint8 channels; /* The number of channels in the audio stream for this bus. */ + + /* Mutable via multiple threads. Must be used atomically. The weird ordering here is for packing reasons. */ + ma_uint8 inputNodeInputBusIndex; /* The index of the input bus on the input. Required for detaching. Will only be used within the spinlock so does not need to be atomic. */ + MA_ATOMIC(4, ma_uint32) flags; /* Some state flags for tracking the read state of the output buffer. A combination of MA_NODE_OUTPUT_BUS_FLAG_*. */ + MA_ATOMIC(4, ma_uint32) refCount; /* Reference count for some thread-safety when detaching. */ + MA_ATOMIC(4, ma_bool32) isAttached; /* This is used to prevent iteration of nodes that are in the middle of being detached. Used for thread safety. */ + MA_ATOMIC(4, ma_spinlock) lock; /* Unfortunate lock, but significantly simplifies the implementation. Required for thread-safe attaching and detaching. */ + MA_ATOMIC(4, float) volume; /* Linear. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_node_output_bus*) pNext; /* If null, it's the tail node or detached. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_node_output_bus*) pPrev; /* If null, it's the head node or detached. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_node*) pInputNode; /* The node that this output bus is attached to. Required for detaching. */ + }; + + /* + A node has multiple input buses. The output buses of a node are connecting to the input busses of + another. An input bus is essentially just a linked list of output buses. + */ + typedef struct ma_node_input_bus ma_node_input_bus; + struct ma_node_input_bus + { + /* Mutable via multiple threads. */ + ma_node_output_bus head; /* Dummy head node for simplifying some lock-free thread-safety stuff. */ + MA_ATOMIC(4, ma_uint32) nextCounter; /* This is used to determine whether or not the input bus is finding the next node in the list. Used for thread safety when detaching output buses. */ + MA_ATOMIC(4, ma_spinlock) lock; /* Unfortunate lock, but significantly simplifies the implementation. Required for thread-safe attaching and detaching. */ + + /* Set once at startup. */ + ma_uint8 channels; /* The number of channels in the audio stream for this bus. */ + }; + + + typedef struct ma_node_base ma_node_base; + struct ma_node_base + { + /* These variables are set once at startup. */ + ma_node_graph* pNodeGraph; /* The graph this node belongs to. */ + const ma_node_vtable* vtable; + float* pCachedData; /* Allocated on the heap. Fixed size. Needs to be stored on the heap because reading from output buses is done in separate function calls. */ + ma_uint16 cachedDataCapInFramesPerBus; /* The capacity of the input data cache in frames, per bus. */ + + /* These variables are read and written only from the audio thread. */ + ma_uint16 cachedFrameCountOut; + ma_uint16 cachedFrameCountIn; + ma_uint16 consumedFrameCountIn; + + /* These variables are read and written between different threads. */ + MA_ATOMIC(4, ma_node_state) state; /* When set to stopped, nothing will be read, regardless of the times in stateTimes. */ + MA_ATOMIC(8, ma_uint64) stateTimes[2]; /* Indexed by ma_node_state. Specifies the time based on the global clock that a node should be considered to be in the relevant state. */ + MA_ATOMIC(8, ma_uint64) localTime; /* The node's local clock. This is just a running sum of the number of output frames that have been processed. Can be modified by any thread with `ma_node_set_time()`. */ + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + ma_node_input_bus* pInputBuses; + ma_node_output_bus* pOutputBuses; + + /* Memory management. */ + ma_node_input_bus _inputBuses[MA_MAX_NODE_LOCAL_BUS_COUNT]; + ma_node_output_bus _outputBuses[MA_MAX_NODE_LOCAL_BUS_COUNT]; + void* _pHeap; /* A heap allocation for internal use only. pInputBuses and/or pOutputBuses will point to this if the bus count exceeds MA_MAX_NODE_LOCAL_BUS_COUNT. */ + ma_bool32 _ownsHeap; /* If set to true, the node owns the heap allocation and _pHeap will be freed in ma_node_uninit(). */ + }; + + MA_API ma_result ma_node_get_heap_size(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_node_init_preallocated(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, void* pHeap, ma_node* pNode); + MA_API ma_result ma_node_init(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node* pNode); + MA_API void ma_node_uninit(ma_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_node_graph* ma_node_get_node_graph(const ma_node* pNode); + MA_API ma_uint32 ma_node_get_input_bus_count(const ma_node* pNode); + MA_API ma_uint32 ma_node_get_output_bus_count(const ma_node* pNode); + MA_API ma_uint32 ma_node_get_input_channels(const ma_node* pNode, ma_uint32 inputBusIndex); + MA_API ma_uint32 ma_node_get_output_channels(const ma_node* pNode, ma_uint32 outputBusIndex); + MA_API ma_result ma_node_attach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex, ma_node* pOtherNode, ma_uint32 otherNodeInputBusIndex); + MA_API ma_result ma_node_detach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex); + MA_API ma_result ma_node_detach_all_output_buses(ma_node* pNode); + MA_API ma_result ma_node_set_output_bus_volume(ma_node* pNode, ma_uint32 outputBusIndex, float volume); + MA_API float ma_node_get_output_bus_volume(const ma_node* pNode, ma_uint32 outputBusIndex); + MA_API ma_result ma_node_set_state(ma_node* pNode, ma_node_state state); + MA_API ma_node_state ma_node_get_state(const ma_node* pNode); + MA_API ma_result ma_node_set_state_time(ma_node* pNode, ma_node_state state, ma_uint64 globalTime); + MA_API ma_uint64 ma_node_get_state_time(const ma_node* pNode, ma_node_state state); + MA_API ma_node_state ma_node_get_state_by_time(const ma_node* pNode, ma_uint64 globalTime); + MA_API ma_node_state ma_node_get_state_by_time_range(const ma_node* pNode, ma_uint64 globalTimeBeg, ma_uint64 globalTimeEnd); + MA_API ma_uint64 ma_node_get_time(const ma_node* pNode); + MA_API ma_result ma_node_set_time(ma_node* pNode, ma_uint64 localTime); + + + typedef struct + { + ma_uint32 channels; + ma_uint16 nodeCacheCapInFrames; + } ma_node_graph_config; + + MA_API ma_node_graph_config ma_node_graph_config_init(ma_uint32 channels); + + + struct ma_node_graph + { + /* Immutable. */ + ma_node_base base; /* The node graph itself is a node so it can be connected as an input to different node graph. This has zero inputs and calls ma_node_graph_read_pcm_frames() to generate it's output. */ + ma_node_base endpoint; /* Special node that all nodes eventually connect to. Data is read from this node in ma_node_graph_read_pcm_frames(). */ + ma_uint16 nodeCacheCapInFrames; + + /* Read and written by multiple threads. */ + MA_ATOMIC(4, ma_bool32) isReading; + }; + + MA_API ma_result ma_node_graph_init(const ma_node_graph_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node_graph* pNodeGraph); + MA_API void ma_node_graph_uninit(ma_node_graph* pNodeGraph, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_node* ma_node_graph_get_endpoint(ma_node_graph* pNodeGraph); + MA_API ma_result ma_node_graph_read_pcm_frames(ma_node_graph* pNodeGraph, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_uint32 ma_node_graph_get_channels(const ma_node_graph* pNodeGraph); + MA_API ma_uint64 ma_node_graph_get_time(const ma_node_graph* pNodeGraph); + MA_API ma_result ma_node_graph_set_time(ma_node_graph* pNodeGraph, ma_uint64 globalTime); + + + + /* Data source node. 0 input buses, 1 output bus. Used for reading from a data source. */ + typedef struct + { + ma_node_config nodeConfig; + ma_data_source* pDataSource; + } ma_data_source_node_config; + + MA_API ma_data_source_node_config ma_data_source_node_config_init(ma_data_source* pDataSource); + + + typedef struct + { + ma_node_base base; + ma_data_source* pDataSource; + } ma_data_source_node; + + MA_API ma_result ma_data_source_node_init(ma_node_graph* pNodeGraph, const ma_data_source_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source_node* pDataSourceNode); + MA_API void ma_data_source_node_uninit(ma_data_source_node* pDataSourceNode, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_result ma_data_source_node_set_looping(ma_data_source_node* pDataSourceNode, ma_bool32 isLooping); + MA_API ma_bool32 ma_data_source_node_is_looping(ma_data_source_node* pDataSourceNode); + + + /* Splitter Node. 1 input, many outputs. Used for splitting/copying a stream so it can be as input into two separate output nodes. */ + typedef struct + { + ma_node_config nodeConfig; + ma_uint32 channels; + ma_uint32 outputBusCount; + } ma_splitter_node_config; + + MA_API ma_splitter_node_config ma_splitter_node_config_init(ma_uint32 channels); + + + typedef struct + { + ma_node_base base; + } ma_splitter_node; + + MA_API ma_result ma_splitter_node_init(ma_node_graph* pNodeGraph, const ma_splitter_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_splitter_node* pSplitterNode); + MA_API void ma_splitter_node_uninit(ma_splitter_node* pSplitterNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + Biquad Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_biquad_config biquad; + } ma_biquad_node_config; + + MA_API ma_biquad_node_config ma_biquad_node_config_init(ma_uint32 channels, float b0, float b1, float b2, float a0, float a1, float a2); + + + typedef struct + { + ma_node_base baseNode; + ma_biquad biquad; + } ma_biquad_node; + + MA_API ma_result ma_biquad_node_init(ma_node_graph* pNodeGraph, const ma_biquad_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad_node* pNode); + MA_API ma_result ma_biquad_node_reinit(const ma_biquad_config* pConfig, ma_biquad_node* pNode); + MA_API void ma_biquad_node_uninit(ma_biquad_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + Low Pass Filter Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_lpf_config lpf; + } ma_lpf_node_config; + + MA_API ma_lpf_node_config ma_lpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + + typedef struct + { + ma_node_base baseNode; + ma_lpf lpf; + } ma_lpf_node; + + MA_API ma_result ma_lpf_node_init(ma_node_graph* pNodeGraph, const ma_lpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf_node* pNode); + MA_API ma_result ma_lpf_node_reinit(const ma_lpf_config* pConfig, ma_lpf_node* pNode); + MA_API void ma_lpf_node_uninit(ma_lpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + High Pass Filter Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_hpf_config hpf; + } ma_hpf_node_config; + + MA_API ma_hpf_node_config ma_hpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + + typedef struct + { + ma_node_base baseNode; + ma_hpf hpf; + } ma_hpf_node; + + MA_API ma_result ma_hpf_node_init(ma_node_graph* pNodeGraph, const ma_hpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf_node* pNode); + MA_API ma_result ma_hpf_node_reinit(const ma_hpf_config* pConfig, ma_hpf_node* pNode); + MA_API void ma_hpf_node_uninit(ma_hpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + Band Pass Filter Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_bpf_config bpf; + } ma_bpf_node_config; + + MA_API ma_bpf_node_config ma_bpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + + typedef struct + { + ma_node_base baseNode; + ma_bpf bpf; + } ma_bpf_node; + + MA_API ma_result ma_bpf_node_init(ma_node_graph* pNodeGraph, const ma_bpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf_node* pNode); + MA_API ma_result ma_bpf_node_reinit(const ma_bpf_config* pConfig, ma_bpf_node* pNode); + MA_API void ma_bpf_node_uninit(ma_bpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + Notching Filter Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_notch_config notch; + } ma_notch_node_config; + + MA_API ma_notch_node_config ma_notch_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency); + + + typedef struct + { + ma_node_base baseNode; + ma_notch2 notch; + } ma_notch_node; + + MA_API ma_result ma_notch_node_init(ma_node_graph* pNodeGraph, const ma_notch_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch_node* pNode); + MA_API ma_result ma_notch_node_reinit(const ma_notch_config* pConfig, ma_notch_node* pNode); + MA_API void ma_notch_node_uninit(ma_notch_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + Peaking Filter Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_peak_config peak; + } ma_peak_node_config; + + MA_API ma_peak_node_config ma_peak_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + + + typedef struct + { + ma_node_base baseNode; + ma_peak2 peak; + } ma_peak_node; + + MA_API ma_result ma_peak_node_init(ma_node_graph* pNodeGraph, const ma_peak_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak_node* pNode); + MA_API ma_result ma_peak_node_reinit(const ma_peak_config* pConfig, ma_peak_node* pNode); + MA_API void ma_peak_node_uninit(ma_peak_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + Low Shelf Filter Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_loshelf_config loshelf; + } ma_loshelf_node_config; + + MA_API ma_loshelf_node_config ma_loshelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + + + typedef struct + { + ma_node_base baseNode; + ma_loshelf2 loshelf; + } ma_loshelf_node; + + MA_API ma_result ma_loshelf_node_init(ma_node_graph* pNodeGraph, const ma_loshelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf_node* pNode); + MA_API ma_result ma_loshelf_node_reinit(const ma_loshelf_config* pConfig, ma_loshelf_node* pNode); + MA_API void ma_loshelf_node_uninit(ma_loshelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + /* + High Shelf Filter Node + */ + typedef struct + { + ma_node_config nodeConfig; + ma_hishelf_config hishelf; + } ma_hishelf_node_config; + + MA_API ma_hishelf_node_config ma_hishelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + + + typedef struct + { + ma_node_base baseNode; + ma_hishelf2 hishelf; + } ma_hishelf_node; + + MA_API ma_result ma_hishelf_node_init(ma_node_graph* pNodeGraph, const ma_hishelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf_node* pNode); + MA_API ma_result ma_hishelf_node_reinit(const ma_hishelf_config* pConfig, ma_hishelf_node* pNode); + MA_API void ma_hishelf_node_uninit(ma_hishelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + + typedef struct + { + ma_node_config nodeConfig; + ma_delay_config delay; + } ma_delay_node_config; + + MA_API ma_delay_node_config ma_delay_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay); + + + typedef struct + { + ma_node_base baseNode; + ma_delay delay; + } ma_delay_node; + + MA_API ma_result ma_delay_node_init(ma_node_graph* pNodeGraph, const ma_delay_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay_node* pDelayNode); + MA_API void ma_delay_node_uninit(ma_delay_node* pDelayNode, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API void ma_delay_node_set_wet(ma_delay_node* pDelayNode, float value); + MA_API float ma_delay_node_get_wet(const ma_delay_node* pDelayNode); + MA_API void ma_delay_node_set_dry(ma_delay_node* pDelayNode, float value); + MA_API float ma_delay_node_get_dry(const ma_delay_node* pDelayNode); + MA_API void ma_delay_node_set_decay(ma_delay_node* pDelayNode, float value); + MA_API float ma_delay_node_get_decay(const ma_delay_node* pDelayNode); +#endif /* MA_NO_NODE_GRAPH */ + + + /* SECTION: miniaudio_engine.h */ + /************************************************************************************************************************************************************ + + Engine + + ************************************************************************************************************************************************************/ +#if !defined(MA_NO_ENGINE) && !defined(MA_NO_NODE_GRAPH) + typedef struct ma_engine ma_engine; + typedef struct ma_sound ma_sound; + + + /* Sound flags. */ + typedef enum + { + /* Resource manager flags. */ + MA_SOUND_FLAG_STREAM = 0x00000001, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM */ + MA_SOUND_FLAG_DECODE = 0x00000002, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE */ + MA_SOUND_FLAG_ASYNC = 0x00000004, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC */ + MA_SOUND_FLAG_WAIT_INIT = 0x00000008, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT */ + MA_SOUND_FLAG_UNKNOWN_LENGTH = 0x00000010, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH */ + + /* ma_sound specific flags. */ + MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT = 0x00001000, /* Do not attach to the endpoint by default. Useful for when setting up nodes in a complex graph system. */ + MA_SOUND_FLAG_NO_PITCH = 0x00002000, /* Disable pitch shifting with ma_sound_set_pitch() and ma_sound_group_set_pitch(). This is an optimization. */ + MA_SOUND_FLAG_NO_SPATIALIZATION = 0x00004000 /* Disable spatialization. */ + } ma_sound_flags; + +#ifndef MA_ENGINE_MAX_LISTENERS +#define MA_ENGINE_MAX_LISTENERS 4 +#endif + +#define MA_LISTENER_INDEX_CLOSEST ((ma_uint8)-1) + + typedef enum + { + ma_engine_node_type_sound, + ma_engine_node_type_group + } ma_engine_node_type; + + typedef struct + { + ma_engine* pEngine; + ma_engine_node_type type; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRate; /* Only used when the type is set to ma_engine_node_type_sound. */ + ma_uint32 volumeSmoothTimeInPCMFrames; /* The number of frames to smooth over volume changes. Defaults to 0 in which case no smoothing is used. */ + ma_mono_expansion_mode monoExpansionMode; + ma_bool8 isPitchDisabled; /* Pitching can be explicitly disabled with MA_SOUND_FLAG_NO_PITCH to optimize processing. */ + ma_bool8 isSpatializationDisabled; /* Spatialization can be explicitly disabled with MA_SOUND_FLAG_NO_SPATIALIZATION. */ + ma_uint8 pinnedListenerIndex; /* The index of the listener this node should always use for spatialization. If set to MA_LISTENER_INDEX_CLOSEST the engine will use the closest listener. */ + } ma_engine_node_config; + + MA_API ma_engine_node_config ma_engine_node_config_init(ma_engine* pEngine, ma_engine_node_type type, ma_uint32 flags); + + + /* Base node object for both ma_sound and ma_sound_group. */ + typedef struct + { + ma_node_base baseNode; /* Must be the first member for compatiblity with the ma_node API. */ + ma_engine* pEngine; /* A pointer to the engine. Set based on the value from the config. */ + ma_uint32 sampleRate; /* The sample rate of the input data. For sounds backed by a data source, this will be the data source's sample rate. Otherwise it'll be the engine's sample rate. */ + ma_uint32 volumeSmoothTimeInPCMFrames; + ma_mono_expansion_mode monoExpansionMode; + ma_fader fader; + ma_linear_resampler resampler; /* For pitch shift. */ + ma_spatializer spatializer; + ma_panner panner; + ma_gainer volumeGainer; /* This will only be used if volumeSmoothTimeInPCMFrames is > 0. */ + ma_atomic_float volume; /* Defaults to 1. */ + MA_ATOMIC(4, float) pitch; + float oldPitch; /* For determining whether or not the resampler needs to be updated to reflect the new pitch. The resampler will be updated on the mixing thread. */ + float oldDopplerPitch; /* For determining whether or not the resampler needs to be updated to take a new doppler pitch into account. */ + MA_ATOMIC(4, ma_bool32) isPitchDisabled; /* When set to true, pitching will be disabled which will allow the resampler to be bypassed to save some computation. */ + MA_ATOMIC(4, ma_bool32) isSpatializationDisabled; /* Set to false by default. When set to false, will not have spatialisation applied. */ + MA_ATOMIC(4, ma_uint32) pinnedListenerIndex; /* The index of the listener this node should always use for spatialization. If set to MA_LISTENER_INDEX_CLOSEST the engine will use the closest listener. */ + + /* When setting a fade, it's not done immediately in ma_sound_set_fade(). It's deferred to the audio thread which means we need to store the settings here. */ + struct + { + ma_atomic_float volumeBeg; + ma_atomic_float volumeEnd; + ma_atomic_uint64 fadeLengthInFrames; /* <-- Defaults to (~(ma_uint64)0) which is used to indicate that no fade should be applied. */ + ma_atomic_uint64 absoluteGlobalTimeInFrames; /* <-- The time to start the fade. */ + } fadeSettings; + + /* Memory management. */ + ma_bool8 _ownsHeap; + void* _pHeap; + } ma_engine_node; + + MA_API ma_result ma_engine_node_get_heap_size(const ma_engine_node_config* pConfig, size_t* pHeapSizeInBytes); + MA_API ma_result ma_engine_node_init_preallocated(const ma_engine_node_config* pConfig, void* pHeap, ma_engine_node* pEngineNode); + MA_API ma_result ma_engine_node_init(const ma_engine_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_engine_node* pEngineNode); + MA_API void ma_engine_node_uninit(ma_engine_node* pEngineNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +#define MA_SOUND_SOURCE_CHANNEL_COUNT 0xFFFFFFFF + + /* Callback for when a sound reaches the end. */ + typedef void (* ma_sound_end_proc)(void* pUserData, ma_sound* pSound); + + typedef struct + { + const char* pFilePath; /* Set this to load from the resource manager. */ + const wchar_t* pFilePathW; /* Set this to load from the resource manager. */ + ma_data_source* pDataSource; /* Set this to load from an existing data source. */ + ma_node* pInitialAttachment; /* If set, the sound will be attached to an input of this node. This can be set to a ma_sound. If set to NULL, the sound will be attached directly to the endpoint unless MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT is set in `flags`. */ + ma_uint32 initialAttachmentInputBusIndex; /* The index of the input bus of pInitialAttachment to attach the sound to. */ + ma_uint32 channelsIn; /* Ignored if using a data source as input (the data source's channel count will be used always). Otherwise, setting to 0 will cause the engine's channel count to be used. */ + ma_uint32 channelsOut; /* Set this to 0 (default) to use the engine's channel count. Set to MA_SOUND_SOURCE_CHANNEL_COUNT to use the data source's channel count (only used if using a data source as input). */ + ma_mono_expansion_mode monoExpansionMode; /* Controls how the mono channel should be expanded to other channels when spatialization is disabled on a sound. */ + ma_uint32 flags; /* A combination of MA_SOUND_FLAG_* flags. */ + ma_uint32 volumeSmoothTimeInPCMFrames; /* The number of frames to smooth over volume changes. Defaults to 0 in which case no smoothing is used. */ + ma_uint64 initialSeekPointInPCMFrames; /* Initializes the sound such that it's seeked to this location by default. */ + ma_uint64 rangeBegInPCMFrames; + ma_uint64 rangeEndInPCMFrames; + ma_uint64 loopPointBegInPCMFrames; + ma_uint64 loopPointEndInPCMFrames; + ma_bool32 isLooping; + ma_sound_end_proc endCallback; /* Fired when the sound reaches the end. Will be fired from the audio thread. Do not restart, uninitialize or otherwise change the state of the sound from here. Instead fire an event or set a variable to indicate to a different thread to change the start of the sound. Will not be fired in response to a scheduled stop with ma_sound_set_stop_time_*(). */ + void* pEndCallbackUserData; +#ifndef MA_NO_RESOURCE_MANAGER + ma_resource_manager_pipeline_notifications initNotifications; +#endif + ma_fence* pDoneFence; /* Deprecated. Use initNotifications instead. Released when the resource manager has finished decoding the entire sound. Not used with streams. */ + } ma_sound_config; + + MA_API ma_sound_config ma_sound_config_init(void); /* Deprecated. Will be removed in version 0.12. Use ma_sound_config_2() instead. */ + MA_API ma_sound_config ma_sound_config_init_2(ma_engine* pEngine); /* Will be renamed to ma_sound_config_init() in version 0.12. */ + + struct ma_sound + { + ma_engine_node engineNode; /* Must be the first member for compatibility with the ma_node API. */ + ma_data_source* pDataSource; + MA_ATOMIC(8, ma_uint64) seekTarget; /* The PCM frame index to seek to in the mixing thread. Set to (~(ma_uint64)0) to not perform any seeking. */ + MA_ATOMIC(4, ma_bool32) atEnd; + ma_sound_end_proc endCallback; + void* pEndCallbackUserData; + ma_bool8 ownsDataSource; + + /* + We're declaring a resource manager data source object here to save us a malloc when loading a + sound via the resource manager, which I *think* will be the most common scenario. + */ +#ifndef MA_NO_RESOURCE_MANAGER + ma_resource_manager_data_source* pResourceManagerDataSource; +#endif + }; + + /* Structure specifically for sounds played with ma_engine_play_sound(). Making this a separate structure to reduce overhead. */ + typedef struct ma_sound_inlined ma_sound_inlined; + struct ma_sound_inlined + { + ma_sound sound; + ma_sound_inlined* pNext; + ma_sound_inlined* pPrev; + }; + + /* A sound group is just a sound. */ + typedef ma_sound_config ma_sound_group_config; + typedef ma_sound ma_sound_group; + + MA_API ma_sound_group_config ma_sound_group_config_init(void); /* Deprecated. Will be removed in version 0.12. Use ma_sound_config_2() instead. */ + MA_API ma_sound_group_config ma_sound_group_config_init_2(ma_engine* pEngine); /* Will be renamed to ma_sound_config_init() in version 0.12. */ + + typedef void (* ma_engine_process_proc)(void* pUserData, float* pFramesOut, ma_uint64 frameCount); + + typedef struct + { +#if !defined(MA_NO_RESOURCE_MANAGER) + ma_resource_manager* pResourceManager; /* Can be null in which case a resource manager will be created for you. */ +#endif +#if !defined(MA_NO_DEVICE_IO) + ma_context* pContext; + ma_device* pDevice; /* If set, the caller is responsible for calling ma_engine_data_callback() in the device's data callback. */ + ma_device_id* pPlaybackDeviceID; /* The ID of the playback device to use with the default listener. */ + ma_device_data_proc dataCallback; /* Can be null. Can be used to provide a custom device data callback. */ + ma_device_notification_proc notificationCallback; +#endif + ma_log* pLog; /* When set to NULL, will use the context's log. */ + ma_uint32 listenerCount; /* Must be between 1 and MA_ENGINE_MAX_LISTENERS. */ + ma_uint32 channels; /* The number of channels to use when mixing and spatializing. When set to 0, will use the native channel count of the device. */ + ma_uint32 sampleRate; /* The sample rate. When set to 0 will use the native channel count of the device. */ + ma_uint32 periodSizeInFrames; /* If set to something other than 0, updates will always be exactly this size. The underlying device may be a different size, but from the perspective of the mixer that won't matter.*/ + ma_uint32 periodSizeInMilliseconds; /* Used if periodSizeInFrames is unset. */ + ma_uint32 gainSmoothTimeInFrames; /* The number of frames to interpolate the gain of spatialized sounds across. If set to 0, will use gainSmoothTimeInMilliseconds. */ + ma_uint32 gainSmoothTimeInMilliseconds; /* When set to 0, gainSmoothTimeInFrames will be used. If both are set to 0, a default value will be used. */ + ma_uint32 defaultVolumeSmoothTimeInPCMFrames; /* Defaults to 0. Controls the default amount of smoothing to apply to volume changes to sounds. High values means more smoothing at the expense of high latency (will take longer to reach the new volume). */ + ma_allocation_callbacks allocationCallbacks; + ma_bool32 noAutoStart; /* When set to true, requires an explicit call to ma_engine_start(). This is false by default, meaning the engine will be started automatically in ma_engine_init(). */ + ma_bool32 noDevice; /* When set to true, don't create a default device. ma_engine_read_pcm_frames() can be called manually to read data. */ + ma_mono_expansion_mode monoExpansionMode; /* Controls how the mono channel should be expanded to other channels when spatialization is disabled on a sound. */ + ma_vfs* pResourceManagerVFS; /* A pointer to a pre-allocated VFS object to use with the resource manager. This is ignored if pResourceManager is not NULL. */ + ma_engine_process_proc onProcess; /* Fired at the end of each call to ma_engine_read_pcm_frames(). For engine's that manage their own internal device (the default configuration), this will be fired from the audio thread, and you do not need to call ma_engine_read_pcm_frames() manually in order to trigger this. */ + void* pProcessUserData; /* User data that's passed into onProcess. */ + } ma_engine_config; + + MA_API ma_engine_config ma_engine_config_init(void); + + + struct ma_engine + { + ma_node_graph nodeGraph; /* An engine is a node graph. It should be able to be plugged into any ma_node_graph API (with a cast) which means this must be the first member of this struct. */ +#if !defined(MA_NO_RESOURCE_MANAGER) + ma_resource_manager* pResourceManager; +#endif +#if !defined(MA_NO_DEVICE_IO) + ma_device* pDevice; /* Optionally set via the config, otherwise allocated by the engine in ma_engine_init(). */ +#endif + ma_log* pLog; + ma_uint32 sampleRate; + ma_uint32 listenerCount; + ma_spatializer_listener listeners[MA_ENGINE_MAX_LISTENERS]; + ma_allocation_callbacks allocationCallbacks; + ma_bool8 ownsResourceManager; + ma_bool8 ownsDevice; + ma_spinlock inlinedSoundLock; /* For synchronizing access so the inlined sound list. */ + ma_sound_inlined* pInlinedSoundHead; /* The first inlined sound. Inlined sounds are tracked in a linked list. */ + MA_ATOMIC(4, ma_uint32) inlinedSoundCount; /* The total number of allocated inlined sound objects. Used for debugging. */ + ma_uint32 gainSmoothTimeInFrames; /* The number of frames to interpolate the gain of spatialized sounds across. */ + ma_uint32 defaultVolumeSmoothTimeInPCMFrames; + ma_mono_expansion_mode monoExpansionMode; + ma_engine_process_proc onProcess; + void* pProcessUserData; + }; + + MA_API ma_result ma_engine_init(const ma_engine_config* pConfig, ma_engine* pEngine); + MA_API void ma_engine_uninit(ma_engine* pEngine); + MA_API ma_result ma_engine_read_pcm_frames(ma_engine* pEngine, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + MA_API ma_node_graph* ma_engine_get_node_graph(ma_engine* pEngine); +#if !defined(MA_NO_RESOURCE_MANAGER) + MA_API ma_resource_manager* ma_engine_get_resource_manager(ma_engine* pEngine); +#endif + MA_API ma_device* ma_engine_get_device(ma_engine* pEngine); + MA_API ma_log* ma_engine_get_log(ma_engine* pEngine); + MA_API ma_node* ma_engine_get_endpoint(ma_engine* pEngine); + MA_API ma_uint64 ma_engine_get_time_in_pcm_frames(const ma_engine* pEngine); + MA_API ma_uint64 ma_engine_get_time_in_milliseconds(const ma_engine* pEngine); + MA_API ma_result ma_engine_set_time_in_pcm_frames(ma_engine* pEngine, ma_uint64 globalTime); + MA_API ma_result ma_engine_set_time_in_milliseconds(ma_engine* pEngine, ma_uint64 globalTime); + MA_API ma_uint64 ma_engine_get_time(const ma_engine* pEngine); /* Deprecated. Use ma_engine_get_time_in_pcm_frames(). Will be removed in version 0.12. */ + MA_API ma_result ma_engine_set_time(ma_engine* pEngine, ma_uint64 globalTime); /* Deprecated. Use ma_engine_set_time_in_pcm_frames(). Will be removed in version 0.12. */ + MA_API ma_uint32 ma_engine_get_channels(const ma_engine* pEngine); + MA_API ma_uint32 ma_engine_get_sample_rate(const ma_engine* pEngine); + + MA_API ma_result ma_engine_start(ma_engine* pEngine); + MA_API ma_result ma_engine_stop(ma_engine* pEngine); + MA_API ma_result ma_engine_set_volume(ma_engine* pEngine, float volume); + MA_API float ma_engine_get_volume(ma_engine* pEngine); + MA_API ma_result ma_engine_set_gain_db(ma_engine* pEngine, float gainDB); + MA_API float ma_engine_get_gain_db(ma_engine* pEngine); + + MA_API ma_uint32 ma_engine_get_listener_count(const ma_engine* pEngine); + MA_API ma_uint32 ma_engine_find_closest_listener(const ma_engine* pEngine, float absolutePosX, float absolutePosY, float absolutePosZ); + MA_API void ma_engine_listener_set_position(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); + MA_API ma_vec3f ma_engine_listener_get_position(const ma_engine* pEngine, ma_uint32 listenerIndex); + MA_API void ma_engine_listener_set_direction(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); + MA_API ma_vec3f ma_engine_listener_get_direction(const ma_engine* pEngine, ma_uint32 listenerIndex); + MA_API void ma_engine_listener_set_velocity(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); + MA_API ma_vec3f ma_engine_listener_get_velocity(const ma_engine* pEngine, ma_uint32 listenerIndex); + MA_API void ma_engine_listener_set_cone(ma_engine* pEngine, ma_uint32 listenerIndex, float innerAngleInRadians, float outerAngleInRadians, float outerGain); + MA_API void ma_engine_listener_get_cone(const ma_engine* pEngine, ma_uint32 listenerIndex, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); + MA_API void ma_engine_listener_set_world_up(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); + MA_API ma_vec3f ma_engine_listener_get_world_up(const ma_engine* pEngine, ma_uint32 listenerIndex); + MA_API void ma_engine_listener_set_enabled(ma_engine* pEngine, ma_uint32 listenerIndex, ma_bool32 isEnabled); + MA_API ma_bool32 ma_engine_listener_is_enabled(const ma_engine* pEngine, ma_uint32 listenerIndex); + +#ifndef MA_NO_RESOURCE_MANAGER + MA_API ma_result ma_engine_play_sound_ex(ma_engine* pEngine, const char* pFilePath, ma_node* pNode, ma_uint32 nodeInputBusIndex); + MA_API ma_result ma_engine_play_sound(ma_engine* pEngine, const char* pFilePath, ma_sound_group* pGroup); /* Fire and forget. */ +#endif + +#ifndef MA_NO_RESOURCE_MANAGER + MA_API ma_result ma_sound_init_from_file(ma_engine* pEngine, const char* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound); + MA_API ma_result ma_sound_init_from_file_w(ma_engine* pEngine, const wchar_t* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound); + MA_API ma_result ma_sound_init_copy(ma_engine* pEngine, const ma_sound* pExistingSound, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound); +#endif + MA_API ma_result ma_sound_init_from_data_source(ma_engine* pEngine, ma_data_source* pDataSource, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound); + MA_API ma_result ma_sound_init_ex(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound); + MA_API void ma_sound_uninit(ma_sound* pSound); + MA_API ma_engine* ma_sound_get_engine(const ma_sound* pSound); + MA_API ma_data_source* ma_sound_get_data_source(const ma_sound* pSound); + MA_API ma_result ma_sound_start(ma_sound* pSound); + MA_API ma_result ma_sound_stop(ma_sound* pSound); + MA_API ma_result ma_sound_stop_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 fadeLengthInFrames); /* Will overwrite any scheduled stop and fade. */ + MA_API ma_result ma_sound_stop_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 fadeLengthInFrames); /* Will overwrite any scheduled stop and fade. */ + MA_API void ma_sound_set_volume(ma_sound* pSound, float volume); + MA_API float ma_sound_get_volume(const ma_sound* pSound); + MA_API void ma_sound_set_pan(ma_sound* pSound, float pan); + MA_API float ma_sound_get_pan(const ma_sound* pSound); + MA_API void ma_sound_set_pan_mode(ma_sound* pSound, ma_pan_mode panMode); + MA_API ma_pan_mode ma_sound_get_pan_mode(const ma_sound* pSound); + MA_API void ma_sound_set_pitch(ma_sound* pSound, float pitch); + MA_API float ma_sound_get_pitch(const ma_sound* pSound); + MA_API void ma_sound_set_spatialization_enabled(ma_sound* pSound, ma_bool32 enabled); + MA_API ma_bool32 ma_sound_is_spatialization_enabled(const ma_sound* pSound); + MA_API void ma_sound_set_pinned_listener_index(ma_sound* pSound, ma_uint32 listenerIndex); + MA_API ma_uint32 ma_sound_get_pinned_listener_index(const ma_sound* pSound); + MA_API ma_uint32 ma_sound_get_listener_index(const ma_sound* pSound); + MA_API ma_vec3f ma_sound_get_direction_to_listener(const ma_sound* pSound); + MA_API void ma_sound_set_position(ma_sound* pSound, float x, float y, float z); + MA_API ma_vec3f ma_sound_get_position(const ma_sound* pSound); + MA_API void ma_sound_set_direction(ma_sound* pSound, float x, float y, float z); + MA_API ma_vec3f ma_sound_get_direction(const ma_sound* pSound); + MA_API void ma_sound_set_velocity(ma_sound* pSound, float x, float y, float z); + MA_API ma_vec3f ma_sound_get_velocity(const ma_sound* pSound); + MA_API void ma_sound_set_attenuation_model(ma_sound* pSound, ma_attenuation_model attenuationModel); + MA_API ma_attenuation_model ma_sound_get_attenuation_model(const ma_sound* pSound); + MA_API void ma_sound_set_positioning(ma_sound* pSound, ma_positioning positioning); + MA_API ma_positioning ma_sound_get_positioning(const ma_sound* pSound); + MA_API void ma_sound_set_rolloff(ma_sound* pSound, float rolloff); + MA_API float ma_sound_get_rolloff(const ma_sound* pSound); + MA_API void ma_sound_set_min_gain(ma_sound* pSound, float minGain); + MA_API float ma_sound_get_min_gain(const ma_sound* pSound); + MA_API void ma_sound_set_max_gain(ma_sound* pSound, float maxGain); + MA_API float ma_sound_get_max_gain(const ma_sound* pSound); + MA_API void ma_sound_set_min_distance(ma_sound* pSound, float minDistance); + MA_API float ma_sound_get_min_distance(const ma_sound* pSound); + MA_API void ma_sound_set_max_distance(ma_sound* pSound, float maxDistance); + MA_API float ma_sound_get_max_distance(const ma_sound* pSound); + MA_API void ma_sound_set_cone(ma_sound* pSound, float innerAngleInRadians, float outerAngleInRadians, float outerGain); + MA_API void ma_sound_get_cone(const ma_sound* pSound, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); + MA_API void ma_sound_set_doppler_factor(ma_sound* pSound, float dopplerFactor); + MA_API float ma_sound_get_doppler_factor(const ma_sound* pSound); + MA_API void ma_sound_set_directional_attenuation_factor(ma_sound* pSound, float directionalAttenuationFactor); + MA_API float ma_sound_get_directional_attenuation_factor(const ma_sound* pSound); + MA_API void ma_sound_set_fade_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames); + MA_API void ma_sound_set_fade_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds); + MA_API void ma_sound_set_fade_start_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames, ma_uint64 absoluteGlobalTimeInFrames); + MA_API void ma_sound_set_fade_start_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds, ma_uint64 absoluteGlobalTimeInMilliseconds); + MA_API float ma_sound_get_current_fade_volume(const ma_sound* pSound); + MA_API void ma_sound_set_start_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames); + MA_API void ma_sound_set_start_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds); + MA_API void ma_sound_set_stop_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames); + MA_API void ma_sound_set_stop_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds); + MA_API void ma_sound_set_stop_time_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInFrames, ma_uint64 fadeLengthInFrames); + MA_API void ma_sound_set_stop_time_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInMilliseconds, ma_uint64 fadeLengthInMilliseconds); + MA_API ma_bool32 ma_sound_is_playing(const ma_sound* pSound); + MA_API ma_uint64 ma_sound_get_time_in_pcm_frames(const ma_sound* pSound); + MA_API ma_uint64 ma_sound_get_time_in_milliseconds(const ma_sound* pSound); + MA_API void ma_sound_set_looping(ma_sound* pSound, ma_bool32 isLooping); + MA_API ma_bool32 ma_sound_is_looping(const ma_sound* pSound); + MA_API ma_bool32 ma_sound_at_end(const ma_sound* pSound); + MA_API ma_result ma_sound_seek_to_pcm_frame(ma_sound* pSound, ma_uint64 frameIndex); /* Just a wrapper around ma_data_source_seek_to_pcm_frame(). */ + MA_API ma_result ma_sound_get_data_format(ma_sound* pSound, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + MA_API ma_result ma_sound_get_cursor_in_pcm_frames(ma_sound* pSound, ma_uint64* pCursor); + MA_API ma_result ma_sound_get_length_in_pcm_frames(ma_sound* pSound, ma_uint64* pLength); + MA_API ma_result ma_sound_get_cursor_in_seconds(ma_sound* pSound, float* pCursor); + MA_API ma_result ma_sound_get_length_in_seconds(ma_sound* pSound, float* pLength); + MA_API ma_result ma_sound_set_end_callback(ma_sound* pSound, ma_sound_end_proc callback, void* pUserData); + + MA_API ma_result ma_sound_group_init(ma_engine* pEngine, ma_uint32 flags, ma_sound_group* pParentGroup, ma_sound_group* pGroup); + MA_API ma_result ma_sound_group_init_ex(ma_engine* pEngine, const ma_sound_group_config* pConfig, ma_sound_group* pGroup); + MA_API void ma_sound_group_uninit(ma_sound_group* pGroup); + MA_API ma_engine* ma_sound_group_get_engine(const ma_sound_group* pGroup); + MA_API ma_result ma_sound_group_start(ma_sound_group* pGroup); + MA_API ma_result ma_sound_group_stop(ma_sound_group* pGroup); + MA_API void ma_sound_group_set_volume(ma_sound_group* pGroup, float volume); + MA_API float ma_sound_group_get_volume(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_pan(ma_sound_group* pGroup, float pan); + MA_API float ma_sound_group_get_pan(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_pan_mode(ma_sound_group* pGroup, ma_pan_mode panMode); + MA_API ma_pan_mode ma_sound_group_get_pan_mode(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_pitch(ma_sound_group* pGroup, float pitch); + MA_API float ma_sound_group_get_pitch(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_spatialization_enabled(ma_sound_group* pGroup, ma_bool32 enabled); + MA_API ma_bool32 ma_sound_group_is_spatialization_enabled(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_pinned_listener_index(ma_sound_group* pGroup, ma_uint32 listenerIndex); + MA_API ma_uint32 ma_sound_group_get_pinned_listener_index(const ma_sound_group* pGroup); + MA_API ma_uint32 ma_sound_group_get_listener_index(const ma_sound_group* pGroup); + MA_API ma_vec3f ma_sound_group_get_direction_to_listener(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_position(ma_sound_group* pGroup, float x, float y, float z); + MA_API ma_vec3f ma_sound_group_get_position(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_direction(ma_sound_group* pGroup, float x, float y, float z); + MA_API ma_vec3f ma_sound_group_get_direction(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_velocity(ma_sound_group* pGroup, float x, float y, float z); + MA_API ma_vec3f ma_sound_group_get_velocity(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_attenuation_model(ma_sound_group* pGroup, ma_attenuation_model attenuationModel); + MA_API ma_attenuation_model ma_sound_group_get_attenuation_model(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_positioning(ma_sound_group* pGroup, ma_positioning positioning); + MA_API ma_positioning ma_sound_group_get_positioning(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_rolloff(ma_sound_group* pGroup, float rolloff); + MA_API float ma_sound_group_get_rolloff(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_min_gain(ma_sound_group* pGroup, float minGain); + MA_API float ma_sound_group_get_min_gain(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_max_gain(ma_sound_group* pGroup, float maxGain); + MA_API float ma_sound_group_get_max_gain(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_min_distance(ma_sound_group* pGroup, float minDistance); + MA_API float ma_sound_group_get_min_distance(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_max_distance(ma_sound_group* pGroup, float maxDistance); + MA_API float ma_sound_group_get_max_distance(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_cone(ma_sound_group* pGroup, float innerAngleInRadians, float outerAngleInRadians, float outerGain); + MA_API void ma_sound_group_get_cone(const ma_sound_group* pGroup, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); + MA_API void ma_sound_group_set_doppler_factor(ma_sound_group* pGroup, float dopplerFactor); + MA_API float ma_sound_group_get_doppler_factor(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_directional_attenuation_factor(ma_sound_group* pGroup, float directionalAttenuationFactor); + MA_API float ma_sound_group_get_directional_attenuation_factor(const ma_sound_group* pGroup); + MA_API void ma_sound_group_set_fade_in_pcm_frames(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames); + MA_API void ma_sound_group_set_fade_in_milliseconds(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds); + MA_API float ma_sound_group_get_current_fade_volume(ma_sound_group* pGroup); + MA_API void ma_sound_group_set_start_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames); + MA_API void ma_sound_group_set_start_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds); + MA_API void ma_sound_group_set_stop_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames); + MA_API void ma_sound_group_set_stop_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds); + MA_API ma_bool32 ma_sound_group_is_playing(const ma_sound_group* pGroup); + MA_API ma_uint64 ma_sound_group_get_time_in_pcm_frames(const ma_sound_group* pGroup); +#endif /* MA_NO_ENGINE */ + /* END SECTION: miniaudio_engine.h */ + +#ifdef __cplusplus +} +#endif +#endif /* miniaudio_h */ + + +/* +This is for preventing greying out of the implementation section. +*/ +#if defined(Q_CREATOR_RUN) || defined(__INTELLISENSE__) || defined(__CDT_PARSER__) +#define MINIAUDIO_IMPLEMENTATION +#endif + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +IMPLEMENTATION + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ +#if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION) +#ifndef miniaudio_c +#define miniaudio_c + +#include +#include /* For INT_MAX */ +#include /* sin(), etc. */ +#include /* For malloc(), free(), wcstombs(). */ +#include /* For memset() */ + +#include +#include +#if !defined(_MSC_VER) && !defined(__DMC__) +#include /* For strcasecmp(). */ +#include /* For wcslen(), wcsrtombs() */ +#endif +#ifdef _MSC_VER +#include /* For _controlfp_s constants */ +#endif + +#if defined(MA_WIN32) +#include + +/* +There's a possibility that WIN32_LEAN_AND_MEAN has been defined which will exclude some symbols +such as STGM_READ and CLSCTL_ALL. We need to check these and define them ourselves if they're +unavailable. +*/ +#ifndef STGM_READ +#define STGM_READ 0x00000000L +#endif +#ifndef CLSCTX_ALL +#define CLSCTX_ALL 23 +#endif + +/* IUnknown is used by both the WASAPI and DirectSound backends. It easier to just declare our version here. */ +typedef struct ma_IUnknown ma_IUnknown; +#endif + +#if !defined(MA_WIN32) +#include +#include /* select() (used for ma_sleep()). */ +#include +#endif + +#ifdef MA_NX +#include /* For nanosleep() */ +#endif + +#include /* For fstat(), etc. */ + +#ifdef MA_EMSCRIPTEN +#include +#endif + + +/* Architecture Detection */ +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#ifdef _WIN32 +#ifdef _WIN64 +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif +#endif + +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#ifdef __GNUC__ +#ifdef __LP64__ +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif +#endif + +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#include +#if INTPTR_MAX == INT64_MAX +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +#define MA_ARM32 +#endif +#if defined(__arm64) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64) +#define MA_ARM64 +#endif + +#if defined(__x86_64__) || defined(_M_X64) +#define MA_X64 +#elif defined(__i386) || defined(_M_IX86) +#define MA_X86 +#elif defined(MA_ARM32) || defined(MA_ARM64) +#define MA_ARM +#endif + +/* Intrinsics Support */ +#if (defined(MA_X64) || defined(MA_X86)) && !defined(__COSMOPOLITAN__) +#if defined(_MSC_VER) && !defined(__clang__) +/* MSVC. */ +#if _MSC_VER >= 1400 && !defined(MA_NO_SSE2) /* 2005 */ +#define MA_SUPPORT_SSE2 +#endif +/*#if _MSC_VER >= 1600 && !defined(MA_NO_AVX)*/ /* 2010 */ + /* #define MA_SUPPORT_AVX*/ + /*#endif*/ +#if _MSC_VER >= 1700 && !defined(MA_NO_AVX2) /* 2012 */ +#define MA_SUPPORT_AVX2 +#endif +#else +/* Assume GNUC-style. */ +#if defined(__SSE2__) && !defined(MA_NO_SSE2) +#define MA_SUPPORT_SSE2 +#endif +/*#if defined(__AVX__) && !defined(MA_NO_AVX)*/ +/* #define MA_SUPPORT_AVX*/ +/*#endif*/ +#if defined(__AVX2__) && !defined(MA_NO_AVX2) +#define MA_SUPPORT_AVX2 +#endif +#endif + +/* If at this point we still haven't determined compiler support for the intrinsics just fall back to __has_include. */ +#if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include) +#if !defined(MA_SUPPORT_SSE2) && !defined(MA_NO_SSE2) && __has_include() +#define MA_SUPPORT_SSE2 +#endif +/*#if !defined(MA_SUPPORT_AVX) && !defined(MA_NO_AVX) && __has_include()*/ +/* #define MA_SUPPORT_AVX*/ +/*#endif*/ +#if !defined(MA_SUPPORT_AVX2) && !defined(MA_NO_AVX2) && __has_include() +#define MA_SUPPORT_AVX2 +#endif +#endif + +#if defined(MA_SUPPORT_AVX2) || defined(MA_SUPPORT_AVX) +#include +#elif defined(MA_SUPPORT_SSE2) +#include +#endif +#endif + +#if defined(MA_ARM) +#if !defined(MA_NO_NEON) && (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) +#define MA_SUPPORT_NEON +#include +#endif +#endif + +/* Begin globally disabled warnings. */ +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable:4752) /* found Intel(R) Advanced Vector Extensions; consider using /arch:AVX */ +#pragma warning(disable:4049) /* compiler limit : terminating line number emission */ +#endif + +#if defined(MA_X64) || defined(MA_X86) +#if defined(_MSC_VER) && !defined(__clang__) +#if _MSC_VER >= 1400 +#include +static MA_INLINE void ma_cpuid(int info[4], int fid) +{ + __cpuid(info, fid); +} +#else +#define MA_NO_CPUID +#endif + +#if _MSC_VER >= 1600 && (defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219) +static MA_INLINE unsigned __int64 ma_xgetbv(int reg) +{ + return _xgetbv(reg); +} +#else +#define MA_NO_XGETBV +#endif +#elif (defined(__GNUC__) || defined(__clang__)) && !defined(MA_ANDROID) +static MA_INLINE void ma_cpuid(int info[4], int fid) +{ + /* + It looks like the -fPIC option uses the ebx register which GCC complains about. We can work around this by just using a different register, the + specific register of which I'm letting the compiler decide on. The "k" prefix is used to specify a 32-bit register. The {...} syntax is for + supporting different assembly dialects. + + What's basically happening is that we're saving and restoring the ebx register manually. + */ +#if defined(MA_X86) && defined(__PIC__) + __asm__ __volatile__ ( + "xchg{l} {%%}ebx, %k1;" + "cpuid;" + "xchg{l} {%%}ebx, %k1;" + : "=a"(info[0]), "=&r"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); +#else + __asm__ __volatile__ ( + "cpuid" : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); +#endif +} + +static MA_INLINE ma_uint64 ma_xgetbv(int reg) +{ + unsigned int hi; + unsigned int lo; + + __asm__ __volatile__ ( + "xgetbv" : "=a"(lo), "=d"(hi) : "c"(reg) + ); + + return ((ma_uint64)hi << 32) | (ma_uint64)lo; +} +#else +#define MA_NO_CPUID +#define MA_NO_XGETBV +#endif +#else +#define MA_NO_CPUID +#define MA_NO_XGETBV +#endif + +static MA_INLINE ma_bool32 ma_has_sse2(void) +{ +#if defined(MA_SUPPORT_SSE2) +#if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_SSE2) +#if defined(MA_X64) + return MA_TRUE; /* 64-bit targets always support SSE2. */ +#elif (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__) + return MA_TRUE; /* If the compiler is allowed to freely generate SSE2 code we can assume support. */ +#else +#if defined(MA_NO_CPUID) + return MA_FALSE; +#else + int info[4]; + ma_cpuid(info, 1); + return (info[3] & (1 << 26)) != 0; +#endif +#endif +#else + return MA_FALSE; /* SSE2 is only supported on x86 and x64 architectures. */ +#endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +#if 0 +static MA_INLINE ma_bool32 ma_has_avx() +{ +#if defined(MA_SUPPORT_AVX) +#if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX) +#if defined(_AVX_) || defined(__AVX__) + return MA_TRUE; /* If the compiler is allowed to freely generate AVX code we can assume support. */ +#else + /* AVX requires both CPU and OS support. */ +#if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV) + return MA_FALSE; +#else + int info[4]; + ma_cpuid(info, 1); + if (((info[2] & (1 << 27)) != 0) && ((info[2] & (1 << 28)) != 0)) { + ma_uint64 xrc = ma_xgetbv(0); + if ((xrc & 0x06) == 0x06) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } +#endif +#endif +#else + return MA_FALSE; /* AVX is only supported on x86 and x64 architectures. */ +#endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} +#endif + +static MA_INLINE ma_bool32 ma_has_avx2(void) +{ +#if defined(MA_SUPPORT_AVX2) +#if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX2) +#if defined(_AVX2_) || defined(__AVX2__) + return MA_TRUE; /* If the compiler is allowed to freely generate AVX2 code we can assume support. */ +#else + /* AVX2 requires both CPU and OS support. */ +#if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV) + return MA_FALSE; +#else + int info1[4]; + int info7[4]; + ma_cpuid(info1, 1); + ma_cpuid(info7, 7); + if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 5)) != 0)) { + ma_uint64 xrc = ma_xgetbv(0); + if ((xrc & 0x06) == 0x06) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } +#endif +#endif +#else + return MA_FALSE; /* AVX2 is only supported on x86 and x64 architectures. */ +#endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +static MA_INLINE ma_bool32 ma_has_neon(void) +{ +#if defined(MA_SUPPORT_NEON) +#if defined(MA_ARM) && !defined(MA_NO_NEON) +#if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + return MA_TRUE; /* If the compiler is allowed to freely generate NEON code we can assume support. */ +#else + /* TODO: Runtime check. */ + return MA_FALSE; +#endif +#else + return MA_FALSE; /* NEON is only supported on ARM architectures. */ +#endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +#if defined(__has_builtin) +#define MA_COMPILER_HAS_BUILTIN(x) __has_builtin(x) +#else +#define MA_COMPILER_HAS_BUILTIN(x) 0 +#endif + +#ifndef MA_ASSUME +#if MA_COMPILER_HAS_BUILTIN(__builtin_assume) +#define MA_ASSUME(x) __builtin_assume(x) +#elif MA_COMPILER_HAS_BUILTIN(__builtin_unreachable) +#define MA_ASSUME(x) do { if (!(x)) __builtin_unreachable(); } while (0) +#elif defined(_MSC_VER) +#define MA_ASSUME(x) __assume(x) +#else +#define MA_ASSUME(x) (void)(x) +#endif +#endif + +#ifndef MA_RESTRICT +#if defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER) +#define MA_RESTRICT __restrict +#else +#define MA_RESTRICT +#endif +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1400 +#define MA_HAS_BYTESWAP16_INTRINSIC +#define MA_HAS_BYTESWAP32_INTRINSIC +#define MA_HAS_BYTESWAP64_INTRINSIC +#elif defined(__clang__) +#if MA_COMPILER_HAS_BUILTIN(__builtin_bswap16) +#define MA_HAS_BYTESWAP16_INTRINSIC +#endif +#if MA_COMPILER_HAS_BUILTIN(__builtin_bswap32) +#define MA_HAS_BYTESWAP32_INTRINSIC +#endif +#if MA_COMPILER_HAS_BUILTIN(__builtin_bswap64) +#define MA_HAS_BYTESWAP64_INTRINSIC +#endif +#elif defined(__GNUC__) +#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) +#define MA_HAS_BYTESWAP32_INTRINSIC +#define MA_HAS_BYTESWAP64_INTRINSIC +#endif +#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +#define MA_HAS_BYTESWAP16_INTRINSIC +#endif +#endif + + +static MA_INLINE ma_bool32 ma_is_little_endian(void) +{ +#if defined(MA_X86) || defined(MA_X64) + return MA_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} + +static MA_INLINE ma_bool32 ma_is_big_endian(void) +{ + return !ma_is_little_endian(); +} + + +static MA_INLINE ma_uint32 ma_swap_endian_uint32(ma_uint32 n) +{ +#ifdef MA_HAS_BYTESWAP32_INTRINSIC +#if defined(_MSC_VER) + return _byteswap_ulong(n); +#elif defined(__GNUC__) || defined(__clang__) +#if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(MA_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */ + /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */ + ma_uint32 r; + __asm__ __volatile__ ( +#if defined(MA_64BIT) + "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) /* <-- This is untested. If someone in the community could test this, that would be appreciated! */ +#else + "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) +#endif + ); + return r; +#else + return __builtin_bswap32(n); +#endif +#else +#error "This compiler does not support the byte swap intrinsic." +#endif +#else + return ((n & 0xFF000000) >> 24) | + ((n & 0x00FF0000) >> 8) | + ((n & 0x0000FF00) << 8) | + ((n & 0x000000FF) << 24); +#endif +} + + +#if !defined(MA_EMSCRIPTEN) +#ifdef MA_WIN32 +static void ma_sleep__win32(ma_uint32 milliseconds) +{ + Sleep((DWORD)milliseconds); +} +#endif +#ifdef MA_POSIX +static void ma_sleep__posix(ma_uint32 milliseconds) +{ +#ifdef MA_EMSCRIPTEN + (void)milliseconds; + MA_ASSERT(MA_FALSE); /* The Emscripten build should never sleep. */ +#else +#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309L) || defined(MA_NX) + struct timespec ts; + ts.tv_sec = milliseconds / 1000; + ts.tv_nsec = milliseconds % 1000 * 1000000; + nanosleep(&ts, NULL); +#else + struct timeval tv; + tv.tv_sec = milliseconds / 1000; + tv.tv_usec = milliseconds % 1000 * 1000; + select(0, NULL, NULL, NULL, &tv); +#endif +#endif +} +#endif + +static MA_INLINE void ma_sleep(ma_uint32 milliseconds) +{ +#ifdef MA_WIN32 + ma_sleep__win32(milliseconds); +#endif +#ifdef MA_POSIX + ma_sleep__posix(milliseconds); +#endif +} +#endif + +static MA_INLINE void ma_yield(void) +{ +#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) + /* x86/x64 */ +#if (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__DMC__)) && !defined(__clang__) +#if _MSC_VER >= 1400 + _mm_pause(); +#else +#if defined(__DMC__) + /* Digital Mars does not recognize the PAUSE opcode. Fall back to NOP. */ + __asm nop; +#else + __asm pause; +#endif +#endif +#else + __asm__ __volatile__ ("pause"); +#endif +#elif (defined(__arm__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7) || defined(_M_ARM64) || (defined(_M_ARM) && _M_ARM >= 7) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) + /* ARM */ +#if defined(_MSC_VER) + /* Apparently there is a __yield() intrinsic that's compatible with ARM, but I cannot find documentation for it nor can I find where it's declared. */ + __yield(); +#else + __asm__ __volatile__ ("yield"); /* ARMv6K/ARMv6T2 and above. */ +#endif +#else + /* Unknown or unsupported architecture. No-op. */ +#endif +} + + +#define MA_MM_DENORMALS_ZERO_MASK 0x0040 +#define MA_MM_FLUSH_ZERO_MASK 0x8000 + +static MA_INLINE unsigned int ma_disable_denormals(void) +{ + unsigned int prevState; + +#if defined(_MSC_VER) + { + /* + Older versions of Visual Studio don't support the "safe" versions of _controlfp_s(). I don't + know which version of Visual Studio first added support for _controlfp_s(), but I do know + that VC6 lacks support. _MSC_VER = 1200 is VC6, but if you get compilation errors on older + versions of Visual Studio, let me know and I'll make the necessary adjustment. + */ +#if _MSC_VER <= 1200 + { + prevState = _statusfp(); + _controlfp(prevState | _DN_FLUSH, _MCW_DN); + } +#else + { + unsigned int unused; + _controlfp_s(&prevState, 0, 0); + _controlfp_s(&unused, prevState | _DN_FLUSH, _MCW_DN); + } +#endif + } +#elif defined(MA_X86) || defined(MA_X64) + { +#if defined(__SSE2__) && !(defined(__TINYC__) || defined(__WATCOMC__) || defined(__COSMOPOLITAN__)) /* <-- Add compilers that lack support for _mm_getcsr() and _mm_setcsr() to this list. */ + { + prevState = _mm_getcsr(); + _mm_setcsr(prevState | MA_MM_DENORMALS_ZERO_MASK | MA_MM_FLUSH_ZERO_MASK); + } +#else + { + /* x88/64, but no support for _mm_getcsr()/_mm_setcsr(). May need to fall back to inlined assembly here. */ + prevState = 0; + } +#endif + } +#else + { + /* Unknown or unsupported architecture. No-op. */ + prevState = 0; + } +#endif + + return prevState; +} + +static MA_INLINE void ma_restore_denormals(unsigned int prevState) +{ +#if defined(_MSC_VER) + { + /* Older versions of Visual Studio do not support _controlfp_s(). See ma_disable_denormals(). */ +#if _MSC_VER <= 1200 + { + _controlfp(prevState, _MCW_DN); + } +#else + { + unsigned int unused; + _controlfp_s(&unused, prevState, _MCW_DN); + } +#endif + } +#elif defined(MA_X86) || defined(MA_X64) + { +#if defined(__SSE2__) && !(defined(__TINYC__) || defined(__WATCOMC__) || defined(__COSMOPOLITAN__)) /* <-- Add compilers that lack support for _mm_getcsr() and _mm_setcsr() to this list. */ + { + _mm_setcsr(prevState); + } +#else + { + /* x88/64, but no support for _mm_getcsr()/_mm_setcsr(). May need to fall back to inlined assembly here. */ + (void)prevState; + } +#endif + } +#else + { + /* Unknown or unsupported architecture. No-op. */ + (void)prevState; + } +#endif +} + + +#ifdef MA_ANDROID +#include + +int ma_android_sdk_version() +{ + char sdkVersion[PROP_VALUE_MAX + 1] = {0, }; + if (__system_property_get("ro.build.version.sdk", sdkVersion)) { + return atoi(sdkVersion); + } + + return 0; +} +#endif + + +#ifndef MA_COINIT_VALUE +#define MA_COINIT_VALUE 0 /* 0 = COINIT_MULTITHREADED */ +#endif + + +#ifndef MA_FLT_MAX +#ifdef FLT_MAX +#define MA_FLT_MAX FLT_MAX +#else +#define MA_FLT_MAX 3.402823466e+38F +#endif +#endif + + +#ifndef MA_PI +#define MA_PI 3.14159265358979323846264f +#endif +#ifndef MA_PI_D +#define MA_PI_D 3.14159265358979323846264 +#endif +#ifndef MA_TAU +#define MA_TAU 6.28318530717958647693f +#endif +#ifndef MA_TAU_D +#define MA_TAU_D 6.28318530717958647693 +#endif + + +/* The default format when ma_format_unknown (0) is requested when initializing a device. */ +#ifndef MA_DEFAULT_FORMAT +#define MA_DEFAULT_FORMAT ma_format_f32 +#endif + +/* The default channel count to use when 0 is used when initializing a device. */ +#ifndef MA_DEFAULT_CHANNELS +#define MA_DEFAULT_CHANNELS 2 +#endif + +/* The default sample rate to use when 0 is used when initializing a device. */ +#ifndef MA_DEFAULT_SAMPLE_RATE +#define MA_DEFAULT_SAMPLE_RATE 48000 +#endif + +/* Default periods when none is specified in ma_device_init(). More periods means more work on the CPU. */ +#ifndef MA_DEFAULT_PERIODS +#define MA_DEFAULT_PERIODS 3 +#endif + +/* The default period size in milliseconds for low latency mode. */ +#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY +#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY 10 +#endif + +/* The default buffer size in milliseconds for conservative mode. */ +#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE +#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE 100 +#endif + +/* The default LPF filter order for linear resampling. Note that this is clamped to MA_MAX_FILTER_ORDER. */ +#ifndef MA_DEFAULT_RESAMPLER_LPF_ORDER +#if MA_MAX_FILTER_ORDER >= 4 +#define MA_DEFAULT_RESAMPLER_LPF_ORDER 4 +#else +#define MA_DEFAULT_RESAMPLER_LPF_ORDER MA_MAX_FILTER_ORDER +#endif +#endif + + +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-variable" +#endif + +/* Standard sample rates, in order of priority. */ +static ma_uint32 g_maStandardSampleRatePriorities[] = { + (ma_uint32)ma_standard_sample_rate_48000, + (ma_uint32)ma_standard_sample_rate_44100, + + (ma_uint32)ma_standard_sample_rate_32000, + (ma_uint32)ma_standard_sample_rate_24000, + (ma_uint32)ma_standard_sample_rate_22050, + + (ma_uint32)ma_standard_sample_rate_88200, + (ma_uint32)ma_standard_sample_rate_96000, + (ma_uint32)ma_standard_sample_rate_176400, + (ma_uint32)ma_standard_sample_rate_192000, + + (ma_uint32)ma_standard_sample_rate_16000, + (ma_uint32)ma_standard_sample_rate_11025, + (ma_uint32)ma_standard_sample_rate_8000, + + (ma_uint32)ma_standard_sample_rate_352800, + (ma_uint32)ma_standard_sample_rate_384000 +}; + +static MA_INLINE ma_bool32 ma_is_standard_sample_rate(ma_uint32 sampleRate) +{ + ma_uint32 iSampleRate; + + for (iSampleRate = 0; iSampleRate < sizeof(g_maStandardSampleRatePriorities) / sizeof(g_maStandardSampleRatePriorities[0]); iSampleRate += 1) { + if (g_maStandardSampleRatePriorities[iSampleRate] == sampleRate) { + return MA_TRUE; + } + } + + /* Getting here means the sample rate is not supported. */ + return MA_FALSE; +} + + +static ma_format g_maFormatPriorities[] = { + ma_format_s16, /* Most common */ + ma_format_f32, + + /*ma_format_s24_32,*/ /* Clean alignment */ + ma_format_s32, + + ma_format_s24, /* Unclean alignment */ + + ma_format_u8 /* Low quality */ +}; +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic pop +#endif + + +MA_API void ma_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_VERSION_MAJOR; + } + + if (pMinor) { + *pMinor = MA_VERSION_MINOR; + } + + if (pRevision) { + *pRevision = MA_VERSION_REVISION; + } +} + +MA_API const char* ma_version_string(void) +{ + return MA_VERSION_STRING; +} + + +/****************************************************************************** + +Standard Library Stuff + +******************************************************************************/ +#ifndef MA_ASSERT +#define MA_ASSERT(condition) assert(condition) +#endif + +#ifndef MA_MALLOC +#define MA_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_REALLOC +#define MA_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_FREE +#define MA_FREE(p) free((p)) +#endif + +static MA_INLINE void ma_zero_memory_default(void* p, size_t sz) +{ + if (p == NULL) { + MA_ASSERT(sz == 0); /* If this is triggered there's an error with the calling code. */ + return; + } + + if (sz > 0) { + memset(p, 0, sz); + } +} + + +#ifndef MA_ZERO_MEMORY +#define MA_ZERO_MEMORY(p, sz) ma_zero_memory_default((p), (sz)) +#endif +#ifndef MA_COPY_MEMORY +#define MA_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_MOVE_MEMORY +#define MA_MOVE_MEMORY(dst, src, sz) memmove((dst), (src), (sz)) +#endif + +#define MA_ZERO_OBJECT(p) MA_ZERO_MEMORY((p), sizeof(*(p))) + +#define ma_countof(x) (sizeof(x) / sizeof(x[0])) +#define ma_max(x, y) (((x) > (y)) ? (x) : (y)) +#define ma_min(x, y) (((x) < (y)) ? (x) : (y)) +#define ma_abs(x) (((x) > 0) ? (x) : -(x)) +#define ma_clamp(x, lo, hi) (ma_max(lo, ma_min(x, hi))) +#define ma_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset)) +#define ma_align(x, a) ((x + (a-1)) & ~(a-1)) +#define ma_align_64(x) ma_align(x, 8) + +#define ma_buffer_frame_capacity(buffer, channels, format) (sizeof(buffer) / ma_get_bytes_per_sample(format) / (channels)) + +static MA_INLINE double ma_sind(double x) +{ + /* TODO: Implement custom sin(x). */ + return sin(x); +} + +static MA_INLINE double ma_expd(double x) +{ + /* TODO: Implement custom exp(x). */ + return exp(x); +} + +static MA_INLINE double ma_logd(double x) +{ + /* TODO: Implement custom log(x). */ + return log(x); +} + +static MA_INLINE double ma_powd(double x, double y) +{ + /* TODO: Implement custom pow(x, y). */ + return pow(x, y); +} + +static MA_INLINE double ma_sqrtd(double x) +{ + /* TODO: Implement custom sqrt(x). */ + return sqrt(x); +} + + +static MA_INLINE float ma_rsqrtf(float x) +{ +#if defined(MA_SUPPORT_SSE2) && !defined(MA_NO_SSE2) && (defined(MA_X64) || (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__)) + { + /* + For SSE we can use RSQRTSS. + + This Stack Overflow post suggests that compilers don't necessarily generate optimal code + when using intrinsics: + + https://web.archive.org/web/20221211012522/https://stackoverflow.com/questions/32687079/getting-fewest-instructions-for-rsqrtss-wrapper + + I'm going to do something similar here, but a bit simpler. + */ +#if defined(__GNUC__) || defined(__clang__) + { + float result; + __asm__ __volatile__("rsqrtss %1, %0" : "=x"(result) : "x"(x)); + return result; + } +#else + { + return _mm_cvtss_f32(_mm_rsqrt_ss(_mm_set_ps1(x))); + } +#endif + } +#else + { + return 1 / (float)ma_sqrtd(x); + } +#endif +} + + +static MA_INLINE float ma_sinf(float x) +{ + return (float)ma_sind((float)x); +} + +static MA_INLINE double ma_cosd(double x) +{ + return ma_sind((MA_PI_D*0.5) - x); +} + +static MA_INLINE float ma_cosf(float x) +{ + return (float)ma_cosd((float)x); +} + +static MA_INLINE double ma_log10d(double x) +{ + return ma_logd(x) * 0.43429448190325182765; +} + +static MA_INLINE float ma_powf(float x, float y) +{ + return (float)ma_powd((double)x, (double)y); +} + +static MA_INLINE float ma_log10f(float x) +{ + return (float)ma_log10d((double)x); +} + + +static MA_INLINE double ma_degrees_to_radians(double degrees) +{ + return degrees * 0.01745329252; +} + +static MA_INLINE double ma_radians_to_degrees(double radians) +{ + return radians * 57.295779512896; +} + +static MA_INLINE float ma_degrees_to_radians_f(float degrees) +{ + return degrees * 0.01745329252f; +} + +static MA_INLINE float ma_radians_to_degrees_f(float radians) +{ + return radians * 57.295779512896f; +} + + +/* +Return Values: +0: Success +22: EINVAL +34: ERANGE + +Not using symbolic constants for errors because I want to avoid #including errno.h + +These are marked as no-inline because of some bad code generation by Clang. None of these functions +are used in any performance-critical code within miniaudio. +*/ +MA_API MA_NO_INLINE int ma_strcpy_s(char* dst, size_t dstSizeInBytes, const char* src) +{ + size_t i; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + for (i = 0; i < dstSizeInBytes && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (i < dstSizeInBytes) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + +MA_API MA_NO_INLINE int ma_wcscpy_s(wchar_t* dst, size_t dstCap, const wchar_t* src) +{ + size_t i; + + if (dst == 0) { + return 22; + } + if (dstCap == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + for (i = 0; i < dstCap && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (i < dstCap) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + + +MA_API MA_NO_INLINE int ma_strncpy_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count) +{ + size_t maxcount; + size_t i; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + maxcount = count; + if (count == ((size_t)-1) || count >= dstSizeInBytes) { /* -1 = _TRUNCATE */ + maxcount = dstSizeInBytes - 1; + } + + for (i = 0; i < maxcount && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (src[i] == '\0' || i == count || count == ((size_t)-1)) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + +MA_API MA_NO_INLINE int ma_strcat_s(char* dst, size_t dstSizeInBytes, const char* src) +{ + char* dstorig; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + dstorig = dst; + + while (dstSizeInBytes > 0 && dst[0] != '\0') { + dst += 1; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + return 22; /* Unterminated. */ + } + + + while (dstSizeInBytes > 0 && src[0] != '\0') { + *dst++ = *src++; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes > 0) { + dst[0] = '\0'; + } else { + dstorig[0] = '\0'; + return 34; + } + + return 0; +} + +MA_API MA_NO_INLINE int ma_strncat_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count) +{ + char* dstorig; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + return 22; + } + + dstorig = dst; + + while (dstSizeInBytes > 0 && dst[0] != '\0') { + dst += 1; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + return 22; /* Unterminated. */ + } + + + if (count == ((size_t)-1)) { /* _TRUNCATE */ + count = dstSizeInBytes - 1; + } + + while (dstSizeInBytes > 0 && src[0] != '\0' && count > 0) { + *dst++ = *src++; + dstSizeInBytes -= 1; + count -= 1; + } + + if (dstSizeInBytes > 0) { + dst[0] = '\0'; + } else { + dstorig[0] = '\0'; + return 34; + } + + return 0; +} + +MA_API MA_NO_INLINE int ma_itoa_s(int value, char* dst, size_t dstSizeInBytes, int radix) +{ + int sign; + unsigned int valueU; + char* dstEnd; + + if (dst == NULL || dstSizeInBytes == 0) { + return 22; + } + if (radix < 2 || radix > 36) { + dst[0] = '\0'; + return 22; + } + + sign = (value < 0 && radix == 10) ? -1 : 1; /* The negative sign is only used when the base is 10. */ + + if (value < 0) { + valueU = -value; + } else { + valueU = value; + } + + dstEnd = dst; + do + { + int remainder = valueU % radix; + if (remainder > 9) { + *dstEnd = (char)((remainder - 10) + 'a'); + } else { + *dstEnd = (char)(remainder + '0'); + } + + dstEnd += 1; + dstSizeInBytes -= 1; + valueU /= radix; + } while (dstSizeInBytes > 0 && valueU > 0); + + if (dstSizeInBytes == 0) { + dst[0] = '\0'; + return 22; /* Ran out of room in the output buffer. */ + } + + if (sign < 0) { + *dstEnd++ = '-'; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + dst[0] = '\0'; + return 22; /* Ran out of room in the output buffer. */ + } + + *dstEnd = '\0'; + + + /* At this point the string will be reversed. */ + dstEnd -= 1; + while (dst < dstEnd) { + char temp = *dst; + *dst = *dstEnd; + *dstEnd = temp; + + dst += 1; + dstEnd -= 1; + } + + return 0; +} + +MA_API MA_NO_INLINE int ma_strcmp(const char* str1, const char* str2) +{ + if (str1 == str2) return 0; + + /* These checks differ from the standard implementation. It's not important, but I prefer it just for sanity. */ + if (str1 == NULL) return -1; + if (str2 == NULL) return 1; + + for (;;) { + if (str1[0] == '\0') { + break; + } + if (str1[0] != str2[0]) { + break; + } + + str1 += 1; + str2 += 1; + } + + return ((unsigned char*)str1)[0] - ((unsigned char*)str2)[0]; +} + +MA_API MA_NO_INLINE int ma_strappend(char* dst, size_t dstSize, const char* srcA, const char* srcB) +{ + int result; + + result = ma_strncpy_s(dst, dstSize, srcA, (size_t)-1); + if (result != 0) { + return result; + } + + result = ma_strncat_s(dst, dstSize, srcB, (size_t)-1); + if (result != 0) { + return result; + } + + return result; +} + +MA_API MA_NO_INLINE char* ma_copy_string(const char* src, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t sz; + char* dst; + + if (src == NULL) { + return NULL; + } + + sz = strlen(src)+1; + dst = (char*)ma_malloc(sz, pAllocationCallbacks); + if (dst == NULL) { + return NULL; + } + + ma_strcpy_s(dst, sz, src); + + return dst; +} + +MA_API MA_NO_INLINE wchar_t* ma_copy_string_w(const wchar_t* src, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t sz = wcslen(src)+1; + wchar_t* dst = (wchar_t*)ma_malloc(sz * sizeof(*dst), pAllocationCallbacks); + if (dst == NULL) { + return NULL; + } + + ma_wcscpy_s(dst, sz, src); + + return dst; +} + + + +#include +static ma_result ma_result_from_errno(int e) +{ + if (e == 0) { + return MA_SUCCESS; + } +#ifdef EPERM + else if (e == EPERM) { return MA_INVALID_OPERATION; } +#endif +#ifdef ENOENT + else if (e == ENOENT) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef ESRCH + else if (e == ESRCH) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef EINTR + else if (e == EINTR) { return MA_INTERRUPT; } +#endif +#ifdef EIO + else if (e == EIO) { return MA_IO_ERROR; } +#endif +#ifdef ENXIO + else if (e == ENXIO) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef E2BIG + else if (e == E2BIG) { return MA_INVALID_ARGS; } +#endif +#ifdef ENOEXEC + else if (e == ENOEXEC) { return MA_INVALID_FILE; } +#endif +#ifdef EBADF + else if (e == EBADF) { return MA_INVALID_FILE; } +#endif +#ifdef ECHILD + else if (e == ECHILD) { return MA_ERROR; } +#endif +#ifdef EAGAIN + else if (e == EAGAIN) { return MA_UNAVAILABLE; } +#endif +#ifdef ENOMEM + else if (e == ENOMEM) { return MA_OUT_OF_MEMORY; } +#endif +#ifdef EACCES + else if (e == EACCES) { return MA_ACCESS_DENIED; } +#endif +#ifdef EFAULT + else if (e == EFAULT) { return MA_BAD_ADDRESS; } +#endif +#ifdef ENOTBLK + else if (e == ENOTBLK) { return MA_ERROR; } +#endif +#ifdef EBUSY + else if (e == EBUSY) { return MA_BUSY; } +#endif +#ifdef EEXIST + else if (e == EEXIST) { return MA_ALREADY_EXISTS; } +#endif +#ifdef EXDEV + else if (e == EXDEV) { return MA_ERROR; } +#endif +#ifdef ENODEV + else if (e == ENODEV) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef ENOTDIR + else if (e == ENOTDIR) { return MA_NOT_DIRECTORY; } +#endif +#ifdef EISDIR + else if (e == EISDIR) { return MA_IS_DIRECTORY; } +#endif +#ifdef EINVAL + else if (e == EINVAL) { return MA_INVALID_ARGS; } +#endif +#ifdef ENFILE + else if (e == ENFILE) { return MA_TOO_MANY_OPEN_FILES; } +#endif +#ifdef EMFILE + else if (e == EMFILE) { return MA_TOO_MANY_OPEN_FILES; } +#endif +#ifdef ENOTTY + else if (e == ENOTTY) { return MA_INVALID_OPERATION; } +#endif +#ifdef ETXTBSY + else if (e == ETXTBSY) { return MA_BUSY; } +#endif +#ifdef EFBIG + else if (e == EFBIG) { return MA_TOO_BIG; } +#endif +#ifdef ENOSPC + else if (e == ENOSPC) { return MA_NO_SPACE; } +#endif +#ifdef ESPIPE + else if (e == ESPIPE) { return MA_BAD_SEEK; } +#endif +#ifdef EROFS + else if (e == EROFS) { return MA_ACCESS_DENIED; } +#endif +#ifdef EMLINK + else if (e == EMLINK) { return MA_TOO_MANY_LINKS; } +#endif +#ifdef EPIPE + else if (e == EPIPE) { return MA_BAD_PIPE; } +#endif +#ifdef EDOM + else if (e == EDOM) { return MA_OUT_OF_RANGE; } +#endif +#ifdef ERANGE + else if (e == ERANGE) { return MA_OUT_OF_RANGE; } +#endif +#ifdef EDEADLK + else if (e == EDEADLK) { return MA_DEADLOCK; } +#endif +#ifdef ENAMETOOLONG + else if (e == ENAMETOOLONG) { return MA_PATH_TOO_LONG; } +#endif +#ifdef ENOLCK + else if (e == ENOLCK) { return MA_ERROR; } +#endif +#ifdef ENOSYS + else if (e == ENOSYS) { return MA_NOT_IMPLEMENTED; } +#endif +#ifdef ENOTEMPTY + else if (e == ENOTEMPTY) { return MA_DIRECTORY_NOT_EMPTY; } +#endif +#ifdef ELOOP + else if (e == ELOOP) { return MA_TOO_MANY_LINKS; } +#endif +#ifdef ENOMSG + else if (e == ENOMSG) { return MA_NO_MESSAGE; } +#endif +#ifdef EIDRM + else if (e == EIDRM) { return MA_ERROR; } +#endif +#ifdef ECHRNG + else if (e == ECHRNG) { return MA_ERROR; } +#endif +#ifdef EL2NSYNC + else if (e == EL2NSYNC) { return MA_ERROR; } +#endif +#ifdef EL3HLT + else if (e == EL3HLT) { return MA_ERROR; } +#endif +#ifdef EL3RST + else if (e == EL3RST) { return MA_ERROR; } +#endif +#ifdef ELNRNG + else if (e == ELNRNG) { return MA_OUT_OF_RANGE; } +#endif +#ifdef EUNATCH + else if (e == EUNATCH) { return MA_ERROR; } +#endif +#ifdef ENOCSI + else if (e == ENOCSI) { return MA_ERROR; } +#endif +#ifdef EL2HLT + else if (e == EL2HLT) { return MA_ERROR; } +#endif +#ifdef EBADE + else if (e == EBADE) { return MA_ERROR; } +#endif +#ifdef EBADR + else if (e == EBADR) { return MA_ERROR; } +#endif +#ifdef EXFULL + else if (e == EXFULL) { return MA_ERROR; } +#endif +#ifdef ENOANO + else if (e == ENOANO) { return MA_ERROR; } +#endif +#ifdef EBADRQC + else if (e == EBADRQC) { return MA_ERROR; } +#endif +#ifdef EBADSLT + else if (e == EBADSLT) { return MA_ERROR; } +#endif +#ifdef EBFONT + else if (e == EBFONT) { return MA_INVALID_FILE; } +#endif +#ifdef ENOSTR + else if (e == ENOSTR) { return MA_ERROR; } +#endif +#ifdef ENODATA + else if (e == ENODATA) { return MA_NO_DATA_AVAILABLE; } +#endif +#ifdef ETIME + else if (e == ETIME) { return MA_TIMEOUT; } +#endif +#ifdef ENOSR + else if (e == ENOSR) { return MA_NO_DATA_AVAILABLE; } +#endif +#ifdef ENONET + else if (e == ENONET) { return MA_NO_NETWORK; } +#endif +#ifdef ENOPKG + else if (e == ENOPKG) { return MA_ERROR; } +#endif +#ifdef EREMOTE + else if (e == EREMOTE) { return MA_ERROR; } +#endif +#ifdef ENOLINK + else if (e == ENOLINK) { return MA_ERROR; } +#endif +#ifdef EADV + else if (e == EADV) { return MA_ERROR; } +#endif +#ifdef ESRMNT + else if (e == ESRMNT) { return MA_ERROR; } +#endif +#ifdef ECOMM + else if (e == ECOMM) { return MA_ERROR; } +#endif +#ifdef EPROTO + else if (e == EPROTO) { return MA_ERROR; } +#endif +#ifdef EMULTIHOP + else if (e == EMULTIHOP) { return MA_ERROR; } +#endif +#ifdef EDOTDOT + else if (e == EDOTDOT) { return MA_ERROR; } +#endif +#ifdef EBADMSG + else if (e == EBADMSG) { return MA_BAD_MESSAGE; } +#endif +#ifdef EOVERFLOW + else if (e == EOVERFLOW) { return MA_TOO_BIG; } +#endif +#ifdef ENOTUNIQ + else if (e == ENOTUNIQ) { return MA_NOT_UNIQUE; } +#endif +#ifdef EBADFD + else if (e == EBADFD) { return MA_ERROR; } +#endif +#ifdef EREMCHG + else if (e == EREMCHG) { return MA_ERROR; } +#endif +#ifdef ELIBACC + else if (e == ELIBACC) { return MA_ACCESS_DENIED; } +#endif +#ifdef ELIBBAD + else if (e == ELIBBAD) { return MA_INVALID_FILE; } +#endif +#ifdef ELIBSCN + else if (e == ELIBSCN) { return MA_INVALID_FILE; } +#endif +#ifdef ELIBMAX + else if (e == ELIBMAX) { return MA_ERROR; } +#endif +#ifdef ELIBEXEC + else if (e == ELIBEXEC) { return MA_ERROR; } +#endif +#ifdef EILSEQ + else if (e == EILSEQ) { return MA_INVALID_DATA; } +#endif +#ifdef ERESTART + else if (e == ERESTART) { return MA_ERROR; } +#endif +#ifdef ESTRPIPE + else if (e == ESTRPIPE) { return MA_ERROR; } +#endif +#ifdef EUSERS + else if (e == EUSERS) { return MA_ERROR; } +#endif +#ifdef ENOTSOCK + else if (e == ENOTSOCK) { return MA_NOT_SOCKET; } +#endif +#ifdef EDESTADDRREQ + else if (e == EDESTADDRREQ) { return MA_NO_ADDRESS; } +#endif +#ifdef EMSGSIZE + else if (e == EMSGSIZE) { return MA_TOO_BIG; } +#endif +#ifdef EPROTOTYPE + else if (e == EPROTOTYPE) { return MA_BAD_PROTOCOL; } +#endif +#ifdef ENOPROTOOPT + else if (e == ENOPROTOOPT) { return MA_PROTOCOL_UNAVAILABLE; } +#endif +#ifdef EPROTONOSUPPORT + else if (e == EPROTONOSUPPORT) { return MA_PROTOCOL_NOT_SUPPORTED; } +#endif +#ifdef ESOCKTNOSUPPORT + else if (e == ESOCKTNOSUPPORT) { return MA_SOCKET_NOT_SUPPORTED; } +#endif +#ifdef EOPNOTSUPP + else if (e == EOPNOTSUPP) { return MA_INVALID_OPERATION; } +#endif +#ifdef EPFNOSUPPORT + else if (e == EPFNOSUPPORT) { return MA_PROTOCOL_FAMILY_NOT_SUPPORTED; } +#endif +#ifdef EAFNOSUPPORT + else if (e == EAFNOSUPPORT) { return MA_ADDRESS_FAMILY_NOT_SUPPORTED; } +#endif +#ifdef EADDRINUSE + else if (e == EADDRINUSE) { return MA_ALREADY_IN_USE; } +#endif +#ifdef EADDRNOTAVAIL + else if (e == EADDRNOTAVAIL) { return MA_ERROR; } +#endif +#ifdef ENETDOWN + else if (e == ENETDOWN) { return MA_NO_NETWORK; } +#endif +#ifdef ENETUNREACH + else if (e == ENETUNREACH) { return MA_NO_NETWORK; } +#endif +#ifdef ENETRESET + else if (e == ENETRESET) { return MA_NO_NETWORK; } +#endif +#ifdef ECONNABORTED + else if (e == ECONNABORTED) { return MA_NO_NETWORK; } +#endif +#ifdef ECONNRESET + else if (e == ECONNRESET) { return MA_CONNECTION_RESET; } +#endif +#ifdef ENOBUFS + else if (e == ENOBUFS) { return MA_NO_SPACE; } +#endif +#ifdef EISCONN + else if (e == EISCONN) { return MA_ALREADY_CONNECTED; } +#endif +#ifdef ENOTCONN + else if (e == ENOTCONN) { return MA_NOT_CONNECTED; } +#endif +#ifdef ESHUTDOWN + else if (e == ESHUTDOWN) { return MA_ERROR; } +#endif +#ifdef ETOOMANYREFS + else if (e == ETOOMANYREFS) { return MA_ERROR; } +#endif +#ifdef ETIMEDOUT + else if (e == ETIMEDOUT) { return MA_TIMEOUT; } +#endif +#ifdef ECONNREFUSED + else if (e == ECONNREFUSED) { return MA_CONNECTION_REFUSED; } +#endif +#ifdef EHOSTDOWN + else if (e == EHOSTDOWN) { return MA_NO_HOST; } +#endif +#ifdef EHOSTUNREACH + else if (e == EHOSTUNREACH) { return MA_NO_HOST; } +#endif +#ifdef EALREADY + else if (e == EALREADY) { return MA_IN_PROGRESS; } +#endif +#ifdef EINPROGRESS + else if (e == EINPROGRESS) { return MA_IN_PROGRESS; } +#endif +#ifdef ESTALE + else if (e == ESTALE) { return MA_INVALID_FILE; } +#endif +#ifdef EUCLEAN + else if (e == EUCLEAN) { return MA_ERROR; } +#endif +#ifdef ENOTNAM + else if (e == ENOTNAM) { return MA_ERROR; } +#endif +#ifdef ENAVAIL + else if (e == ENAVAIL) { return MA_ERROR; } +#endif +#ifdef EISNAM + else if (e == EISNAM) { return MA_ERROR; } +#endif +#ifdef EREMOTEIO + else if (e == EREMOTEIO) { return MA_IO_ERROR; } +#endif +#ifdef EDQUOT + else if (e == EDQUOT) { return MA_NO_SPACE; } +#endif +#ifdef ENOMEDIUM + else if (e == ENOMEDIUM) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef EMEDIUMTYPE + else if (e == EMEDIUMTYPE) { return MA_ERROR; } +#endif +#ifdef ECANCELED + else if (e == ECANCELED) { return MA_CANCELLED; } +#endif +#ifdef ENOKEY + else if (e == ENOKEY) { return MA_ERROR; } +#endif +#ifdef EKEYEXPIRED + else if (e == EKEYEXPIRED) { return MA_ERROR; } +#endif +#ifdef EKEYREVOKED + else if (e == EKEYREVOKED) { return MA_ERROR; } +#endif +#ifdef EKEYREJECTED + else if (e == EKEYREJECTED) { return MA_ERROR; } +#endif +#ifdef EOWNERDEAD + else if (e == EOWNERDEAD) { return MA_ERROR; } +#endif +#ifdef ENOTRECOVERABLE + else if (e == ENOTRECOVERABLE) { return MA_ERROR; } +#endif +#ifdef ERFKILL + else if (e == ERFKILL) { return MA_ERROR; } +#endif +#ifdef EHWPOISON + else if (e == EHWPOISON) { return MA_ERROR; } +#endif + else { + return MA_ERROR; + } +} + +MA_API ma_result ma_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) +{ +#if defined(_MSC_VER) && _MSC_VER >= 1400 + errno_t err; +#endif + + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + err = fopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return ma_result_from_errno(err); + } +#else +#if defined(_WIN32) || defined(__APPLE__) + *ppFile = fopen(pFilePath, pOpenMode); +#else +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS == 64 && defined(_LARGEFILE64_SOURCE) + *ppFile = fopen64(pFilePath, pOpenMode); +#else + *ppFile = fopen(pFilePath, pOpenMode); +#endif +#endif + if (*ppFile == NULL) { + ma_result result = ma_result_from_errno(errno); + if (result == MA_SUCCESS) { + result = MA_ERROR; /* Just a safety check to make sure we never ever return success when pFile == NULL. */ + } + + return result; + } +#endif + + return MA_SUCCESS; +} + + + +/* +_wfopen() isn't always available in all compilation environments. + +* Windows only. +* MSVC seems to support it universally as far back as VC6 from what I can tell (haven't checked further back). +* MinGW-64 (both 32- and 64-bit) seems to support it. +* MinGW wraps it in !defined(__STRICT_ANSI__). +* OpenWatcom wraps it in !defined(_NO_EXT_KEYS). + +This can be reviewed as compatibility issues arise. The preference is to use _wfopen_s() and _wfopen() as opposed to the wcsrtombs() +fallback, so if you notice your compiler not detecting this properly I'm happy to look at adding support. +*/ +#if defined(_WIN32) +#if defined(_MSC_VER) || defined(__MINGW64__) || (!defined(__STRICT_ANSI__) && !defined(_NO_EXT_KEYS)) +#define MA_HAS_WFOPEN +#endif +#endif + +MA_API ma_result ma_wfopen(FILE** ppFile, const wchar_t* pFilePath, const wchar_t* pOpenMode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_HAS_WFOPEN) + { + /* Use _wfopen() on Windows. */ +#if defined(_MSC_VER) && _MSC_VER >= 1400 + errno_t err = _wfopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return ma_result_from_errno(err); + } +#else + *ppFile = _wfopen(pFilePath, pOpenMode); + if (*ppFile == NULL) { + return ma_result_from_errno(errno); + } +#endif + (void)pAllocationCallbacks; + } +#else + /* + Use fopen() on anything other than Windows. Requires a conversion. This is annoying because fopen() is locale specific. The only real way I can + think of to do this is with wcsrtombs(). Note that wcstombs() is apparently not thread-safe because it uses a static global mbstate_t object for + maintaining state. I've checked this with -std=c89 and it works, but if somebody get's a compiler error I'll look into improving compatibility. + */ + { + mbstate_t mbs; + size_t lenMB; + const wchar_t* pFilePathTemp = pFilePath; + char* pFilePathMB = NULL; + char pOpenModeMB[32] = {0}; + + /* Get the length first. */ + MA_ZERO_OBJECT(&mbs); + lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs); + if (lenMB == (size_t)-1) { + return ma_result_from_errno(errno); + } + + pFilePathMB = (char*)ma_malloc(lenMB + 1, pAllocationCallbacks); + if (pFilePathMB == NULL) { + return MA_OUT_OF_MEMORY; + } + + pFilePathTemp = pFilePath; + MA_ZERO_OBJECT(&mbs); + wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs); + + /* The open mode should always consist of ASCII characters so we should be able to do a trivial conversion. */ + { + size_t i = 0; + for (;;) { + if (pOpenMode[i] == 0) { + pOpenModeMB[i] = '\0'; + break; + } + + pOpenModeMB[i] = (char)pOpenMode[i]; + i += 1; + } + } + + *ppFile = fopen(pFilePathMB, pOpenModeMB); + + ma_free(pFilePathMB, pAllocationCallbacks); + } + + if (*ppFile == NULL) { + return MA_ERROR; + } +#endif + + return MA_SUCCESS; +} + + + +static MA_INLINE void ma_copy_memory_64(void* dst, const void* src, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_COPY_MEMORY(dst, src, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToCopyNow = sizeInBytes; + if (bytesToCopyNow > MA_SIZE_MAX) { + bytesToCopyNow = MA_SIZE_MAX; + } + + MA_COPY_MEMORY(dst, src, (size_t)bytesToCopyNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToCopyNow; + dst = ( void*)(( ma_uint8*)dst + bytesToCopyNow); + src = (const void*)((const ma_uint8*)src + bytesToCopyNow); + } +#endif +} + +static MA_INLINE void ma_zero_memory_64(void* dst, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_ZERO_MEMORY(dst, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToZeroNow = sizeInBytes; + if (bytesToZeroNow > MA_SIZE_MAX) { + bytesToZeroNow = MA_SIZE_MAX; + } + + MA_ZERO_MEMORY(dst, (size_t)bytesToZeroNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToZeroNow; + dst = (void*)((ma_uint8*)dst + bytesToZeroNow); + } +#endif +} + + +/* Thanks to good old Bit Twiddling Hacks for this one: http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ +static MA_INLINE unsigned int ma_next_power_of_2(unsigned int x) +{ + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + + return x; +} + +static MA_INLINE unsigned int ma_prev_power_of_2(unsigned int x) +{ + return ma_next_power_of_2(x) >> 1; +} + +static MA_INLINE unsigned int ma_round_to_power_of_2(unsigned int x) +{ + unsigned int prev = ma_prev_power_of_2(x); + unsigned int next = ma_next_power_of_2(x); + if ((next - x) > (x - prev)) { + return prev; + } else { + return next; + } +} + +static MA_INLINE unsigned int ma_count_set_bits(unsigned int x) +{ + unsigned int count = 0; + while (x != 0) { + if (x & 1) { + count += 1; + } + + x = x >> 1; + } + + return count; +} + + + +/************************************************************************************************************************************************************** + +Allocation Callbacks + +**************************************************************************************************************************************************************/ +static void* ma__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_MALLOC(sz); +} + +static void* ma__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_REALLOC(p, sz); +} + +static void ma__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_FREE(p); +} + +static ma_allocation_callbacks ma_allocation_callbacks_init_default(void) +{ + ma_allocation_callbacks callbacks; + callbacks.pUserData = NULL; + callbacks.onMalloc = ma__malloc_default; + callbacks.onRealloc = ma__realloc_default; + callbacks.onFree = ma__free_default; + + return callbacks; +} + +static ma_result ma_allocation_callbacks_init_copy(ma_allocation_callbacks* pDst, const ma_allocation_callbacks* pSrc) +{ + if (pDst == NULL) { + return MA_INVALID_ARGS; + } + + if (pSrc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->pUserData == NULL && pSrc->onFree == NULL && pSrc->onMalloc == NULL && pSrc->onRealloc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->onFree == NULL || (pSrc->onMalloc == NULL && pSrc->onRealloc == NULL)) { + return MA_INVALID_ARGS; /* Invalid allocation callbacks. */ + } else { + *pDst = *pSrc; + } + } + } + + return MA_SUCCESS; +} + + + + +/************************************************************************************************************************************************************** + +Logging + +**************************************************************************************************************************************************************/ +MA_API const char* ma_log_level_to_string(ma_uint32 logLevel) +{ + switch (logLevel) + { + case MA_LOG_LEVEL_DEBUG: return "DEBUG"; + case MA_LOG_LEVEL_INFO: return "INFO"; + case MA_LOG_LEVEL_WARNING: return "WARNING"; + case MA_LOG_LEVEL_ERROR: return "ERROR"; + default: return "ERROR"; + } +} + +#if defined(MA_DEBUG_OUTPUT) +#if defined(MA_ANDROID) +#include +#endif + +/* Customize this to use a specific tag in __android_log_print() for debug output messages. */ +#ifndef MA_ANDROID_LOG_TAG +#define MA_ANDROID_LOG_TAG "miniaudio" +#endif + +void ma_log_callback_debug(void* pUserData, ma_uint32 level, const char* pMessage) +{ + (void)pUserData; + + /* Special handling for some platforms. */ +#if defined(MA_ANDROID) + { + /* Android. */ + __android_log_print(ANDROID_LOG_DEBUG, MA_ANDROID_LOG_TAG, "%s: %s", ma_log_level_to_string(level), pMessage); + } +#else + { + /* Everything else. */ + printf("%s: %s", ma_log_level_to_string(level), pMessage); + } +#endif +} +#endif + +MA_API ma_log_callback ma_log_callback_init(ma_log_callback_proc onLog, void* pUserData) +{ + ma_log_callback callback; + + MA_ZERO_OBJECT(&callback); + callback.onLog = onLog; + callback.pUserData = pUserData; + + return callback; +} + + +MA_API ma_result ma_log_init(const ma_allocation_callbacks* pAllocationCallbacks, ma_log* pLog) +{ + if (pLog == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLog); + ma_allocation_callbacks_init_copy(&pLog->allocationCallbacks, pAllocationCallbacks); + + /* We need a mutex for thread safety. */ +#ifndef MA_NO_THREADING + { + ma_result result = ma_mutex_init(&pLog->lock); + if (result != MA_SUCCESS) { + return result; + } + } +#endif + + /* If we're using debug output, enable it. */ +#if defined(MA_DEBUG_OUTPUT) + { + ma_log_register_callback(pLog, ma_log_callback_init(ma_log_callback_debug, NULL)); /* Doesn't really matter if this fails. */ + } +#endif + + return MA_SUCCESS; +} + +MA_API void ma_log_uninit(ma_log* pLog) +{ + if (pLog == NULL) { + return; + } + +#ifndef MA_NO_THREADING + ma_mutex_uninit(&pLog->lock); +#endif +} + +static void ma_log_lock(ma_log* pLog) +{ +#ifndef MA_NO_THREADING + ma_mutex_lock(&pLog->lock); +#else + (void)pLog; +#endif +} + +static void ma_log_unlock(ma_log* pLog) +{ +#ifndef MA_NO_THREADING + ma_mutex_unlock(&pLog->lock); +#else + (void)pLog; +#endif +} + +MA_API ma_result ma_log_register_callback(ma_log* pLog, ma_log_callback callback) +{ + ma_result result = MA_SUCCESS; + + if (pLog == NULL || callback.onLog == NULL) { + return MA_INVALID_ARGS; + } + + ma_log_lock(pLog); + { + if (pLog->callbackCount == ma_countof(pLog->callbacks)) { + result = MA_OUT_OF_MEMORY; /* Reached the maximum allowed log callbacks. */ + } else { + pLog->callbacks[pLog->callbackCount] = callback; + pLog->callbackCount += 1; + } + } + ma_log_unlock(pLog); + + return result; +} + +MA_API ma_result ma_log_unregister_callback(ma_log* pLog, ma_log_callback callback) +{ + if (pLog == NULL) { + return MA_INVALID_ARGS; + } + + ma_log_lock(pLog); + { + ma_uint32 iLog; + for (iLog = 0; iLog < pLog->callbackCount; ) { + if (pLog->callbacks[iLog].onLog == callback.onLog) { + /* Found. Move everything down a slot. */ + ma_uint32 jLog; + for (jLog = iLog; jLog < pLog->callbackCount-1; jLog += 1) { + pLog->callbacks[jLog] = pLog->callbacks[jLog + 1]; + } + + pLog->callbackCount -= 1; + } else { + /* Not found. */ + iLog += 1; + } + } + } + ma_log_unlock(pLog); + + return MA_SUCCESS; +} + +MA_API ma_result ma_log_post(ma_log* pLog, ma_uint32 level, const char* pMessage) +{ + if (pLog == NULL || pMessage == NULL) { + return MA_INVALID_ARGS; + } + + ma_log_lock(pLog); + { + ma_uint32 iLog; + for (iLog = 0; iLog < pLog->callbackCount; iLog += 1) { + if (pLog->callbacks[iLog].onLog) { + pLog->callbacks[iLog].onLog(pLog->callbacks[iLog].pUserData, level, pMessage); + } + } + } + ma_log_unlock(pLog); + + return MA_SUCCESS; +} + + +/* +We need to emulate _vscprintf() for the VC6 build. This can be more efficient, but since it's only VC6, and it's just a +logging function, I'm happy to keep this simple. In the VC6 build we can implement this in terms of _vsnprintf(). +*/ +#if defined(_MSC_VER) && _MSC_VER < 1900 +static int ma_vscprintf(const ma_allocation_callbacks* pAllocationCallbacks, const char* format, va_list args) +{ +#if _MSC_VER > 1200 + return _vscprintf(format, args); +#else + int result; + char* pTempBuffer = NULL; + size_t tempBufferCap = 1024; + + if (format == NULL) { + errno = EINVAL; + return -1; + } + + for (;;) { + char* pNewTempBuffer = (char*)ma_realloc(pTempBuffer, tempBufferCap, pAllocationCallbacks); + if (pNewTempBuffer == NULL) { + ma_free(pTempBuffer, pAllocationCallbacks); + errno = ENOMEM; + return -1; /* Out of memory. */ + } + + pTempBuffer = pNewTempBuffer; + + result = _vsnprintf(pTempBuffer, tempBufferCap, format, args); + ma_free(pTempBuffer, NULL); + + if (result != -1) { + break; /* Got it. */ + } + + /* Buffer wasn't big enough. Ideally it'd be nice to use an error code to know the reason for sure, but this is reliable enough. */ + tempBufferCap *= 2; + } + + return result; +#endif +} +#endif + +MA_API ma_result ma_log_postv(ma_log* pLog, ma_uint32 level, const char* pFormat, va_list args) +{ + if (pLog == NULL || pFormat == NULL) { + return MA_INVALID_ARGS; + } + +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || ((!defined(_MSC_VER) || _MSC_VER >= 1900) && !defined(__STRICT_ANSI__) && !defined(_NO_EXT_KEYS)) || (defined(__cplusplus) && __cplusplus >= 201103L) + { + ma_result result; + int length; + char pFormattedMessageStack[1024]; + char* pFormattedMessageHeap = NULL; + + /* First try formatting into our fixed sized stack allocated buffer. If this is too small we'll fallback to a heap allocation. */ + length = vsnprintf(pFormattedMessageStack, sizeof(pFormattedMessageStack), pFormat, args); + if (length < 0) { + return MA_INVALID_OPERATION; /* An error occured when trying to convert the buffer. */ + } + + if ((size_t)length < sizeof(pFormattedMessageStack)) { + /* The string was written to the stack. */ + result = ma_log_post(pLog, level, pFormattedMessageStack); + } else { + /* The stack buffer was too small, try the heap. */ + pFormattedMessageHeap = (char*)ma_malloc(length + 1, &pLog->allocationCallbacks); + if (pFormattedMessageHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + + length = vsnprintf(pFormattedMessageHeap, length + 1, pFormat, args); + if (length < 0) { + ma_free(pFormattedMessageHeap, &pLog->allocationCallbacks); + return MA_INVALID_OPERATION; + } + + result = ma_log_post(pLog, level, pFormattedMessageHeap); + ma_free(pFormattedMessageHeap, &pLog->allocationCallbacks); + } + + return result; + } +#else + { + /* + Without snprintf() we need to first measure the string and then heap allocate it. I'm only aware of Visual Studio having support for this without snprintf(), so we'll + need to restrict this branch to Visual Studio. For other compilers we need to just not support formatted logging because I don't want the security risk of overflowing + a fixed sized stack allocated buffer. + */ +#if defined(_MSC_VER) && _MSC_VER >= 1200 /* 1200 = VC6 */ + { + ma_result result; + int formattedLen; + char* pFormattedMessage = NULL; + va_list args2; + +#if _MSC_VER >= 1800 + { + va_copy(args2, args); + } +#else + { + args2 = args; + } +#endif + + formattedLen = ma_vscprintf(&pLog->allocationCallbacks, pFormat, args2); + va_end(args2); + + if (formattedLen <= 0) { + return MA_INVALID_OPERATION; + } + + pFormattedMessage = (char*)ma_malloc(formattedLen + 1, &pLog->allocationCallbacks); + if (pFormattedMessage == NULL) { + return MA_OUT_OF_MEMORY; + } + + /* We'll get errors on newer versions of Visual Studio if we try to use vsprintf(). */ +#if _MSC_VER >= 1400 /* 1400 = Visual Studio 2005 */ + { + vsprintf_s(pFormattedMessage, formattedLen + 1, pFormat, args); + } +#else + { + vsprintf(pFormattedMessage, pFormat, args); + } +#endif + + result = ma_log_post(pLog, level, pFormattedMessage); + ma_free(pFormattedMessage, &pLog->allocationCallbacks); + + return result; + } +#else + { + /* Can't do anything because we don't have a safe way of to emulate vsnprintf() without a manual solution. */ + (void)level; + (void)args; + + return MA_INVALID_OPERATION; + } +#endif + } +#endif +} + +MA_API ma_result ma_log_postf(ma_log* pLog, ma_uint32 level, const char* pFormat, ...) +{ + ma_result result; + va_list args; + + if (pLog == NULL || pFormat == NULL) { + return MA_INVALID_ARGS; + } + + va_start(args, pFormat); + { + result = ma_log_postv(pLog, level, pFormat, args); + } + va_end(args); + + return result; +} + + + +static MA_INLINE ma_uint8 ma_clip_u8(ma_int32 x) +{ + return (ma_uint8)(ma_clamp(x, -128, 127) + 128); +} + +static MA_INLINE ma_int16 ma_clip_s16(ma_int32 x) +{ + return (ma_int16)ma_clamp(x, -32768, 32767); +} + +static MA_INLINE ma_int64 ma_clip_s24(ma_int64 x) +{ + return (ma_int64)ma_clamp(x, -8388608, 8388607); +} + +static MA_INLINE ma_int32 ma_clip_s32(ma_int64 x) +{ + /* This dance is to silence warnings with -std=c89. A good compiler should be able to optimize this away. */ + ma_int64 clipMin; + ma_int64 clipMax; + clipMin = -((ma_int64)2147483647 + 1); + clipMax = (ma_int64)2147483647; + + return (ma_int32)ma_clamp(x, clipMin, clipMax); +} + +static MA_INLINE float ma_clip_f32(float x) +{ + if (x < -1) return -1; + if (x > +1) return +1; + return x; +} + + +static MA_INLINE float ma_mix_f32(float x, float y, float a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE float ma_mix_f32_fast(float x, float y, float a) +{ + float r0 = (y - x); + float r1 = r0*a; + return x + r1; + /*return x + (y - x)*a;*/ +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE __m128 ma_mix_f32_fast__sse2(__m128 x, __m128 y, __m128 a) +{ + return _mm_add_ps(x, _mm_mul_ps(_mm_sub_ps(y, x), a)); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE __m256 ma_mix_f32_fast__avx2(__m256 x, __m256 y, __m256 a) +{ + return _mm256_add_ps(x, _mm256_mul_ps(_mm256_sub_ps(y, x), a)); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE float32x4_t ma_mix_f32_fast__neon(float32x4_t x, float32x4_t y, float32x4_t a) +{ + return vaddq_f32(x, vmulq_f32(vsubq_f32(y, x), a)); +} +#endif + + +static MA_INLINE double ma_mix_f64(double x, double y, double a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE double ma_mix_f64_fast(double x, double y, double a) +{ + return x + (y - x)*a; +} + +static MA_INLINE float ma_scale_to_range_f32(float x, float lo, float hi) +{ + return lo + x*(hi-lo); +} + + +/* +Greatest common factor using Euclid's algorithm iteratively. +*/ +static MA_INLINE ma_uint32 ma_gcf_u32(ma_uint32 a, ma_uint32 b) +{ + for (;;) { + if (b == 0) { + break; + } else { + ma_uint32 t = a; + a = b; + b = t % a; + } + } + + return a; +} + + +static ma_uint32 ma_ffs_32(ma_uint32 x) +{ + ma_uint32 i; + + /* Just a naive implementation just to get things working for now. Will optimize this later. */ + for (i = 0; i < 32; i += 1) { + if ((x & (1 << i)) != 0) { + return i; + } + } + + return i; +} + +static MA_INLINE ma_int16 ma_float_to_fixed_16(float x) +{ + return (ma_int16)(x * (1 << 8)); +} + + + +/* +Random Number Generation + +miniaudio uses the LCG random number generation algorithm. This is good enough for audio. + +Note that miniaudio's global LCG implementation uses global state which is _not_ thread-local. When this is called across +multiple threads, results will be unpredictable. However, it won't crash and results will still be random enough for +miniaudio's purposes. +*/ +#ifndef MA_DEFAULT_LCG_SEED +#define MA_DEFAULT_LCG_SEED 4321 +#endif + +#define MA_LCG_M 2147483647 +#define MA_LCG_A 48271 +#define MA_LCG_C 0 + +static ma_lcg g_maLCG = {MA_DEFAULT_LCG_SEED}; /* Non-zero initial seed. Use ma_seed() to use an explicit seed. */ + +static MA_INLINE void ma_lcg_seed(ma_lcg* pLCG, ma_int32 seed) +{ + MA_ASSERT(pLCG != NULL); + pLCG->state = seed; +} + +static MA_INLINE ma_int32 ma_lcg_rand_s32(ma_lcg* pLCG) +{ + pLCG->state = (MA_LCG_A * pLCG->state + MA_LCG_C) % MA_LCG_M; + return pLCG->state; +} + +static MA_INLINE ma_uint32 ma_lcg_rand_u32(ma_lcg* pLCG) +{ + return (ma_uint32)ma_lcg_rand_s32(pLCG); +} + +static MA_INLINE ma_int16 ma_lcg_rand_s16(ma_lcg* pLCG) +{ + return (ma_int16)(ma_lcg_rand_s32(pLCG) & 0xFFFF); +} + +static MA_INLINE double ma_lcg_rand_f64(ma_lcg* pLCG) +{ + return ma_lcg_rand_s32(pLCG) / (double)0x7FFFFFFF; +} + +static MA_INLINE float ma_lcg_rand_f32(ma_lcg* pLCG) +{ + return (float)ma_lcg_rand_f64(pLCG); +} + +static MA_INLINE float ma_lcg_rand_range_f32(ma_lcg* pLCG, float lo, float hi) +{ + return ma_scale_to_range_f32(ma_lcg_rand_f32(pLCG), lo, hi); +} + +static MA_INLINE ma_int32 ma_lcg_rand_range_s32(ma_lcg* pLCG, ma_int32 lo, ma_int32 hi) +{ + if (lo == hi) { + return lo; + } + + return lo + ma_lcg_rand_u32(pLCG) / (0xFFFFFFFF / (hi - lo + 1) + 1); +} + + + +static MA_INLINE void ma_seed(ma_int32 seed) +{ + ma_lcg_seed(&g_maLCG, seed); +} + +static MA_INLINE ma_int32 ma_rand_s32(void) +{ + return ma_lcg_rand_s32(&g_maLCG); +} + +static MA_INLINE ma_uint32 ma_rand_u32(void) +{ + return ma_lcg_rand_u32(&g_maLCG); +} + +static MA_INLINE double ma_rand_f64(void) +{ + return ma_lcg_rand_f64(&g_maLCG); +} + +static MA_INLINE float ma_rand_f32(void) +{ + return ma_lcg_rand_f32(&g_maLCG); +} + +static MA_INLINE float ma_rand_range_f32(float lo, float hi) +{ + return ma_lcg_rand_range_f32(&g_maLCG, lo, hi); +} + +static MA_INLINE ma_int32 ma_rand_range_s32(ma_int32 lo, ma_int32 hi) +{ + return ma_lcg_rand_range_s32(&g_maLCG, lo, hi); +} + + +static MA_INLINE float ma_dither_f32_rectangle(float ditherMin, float ditherMax) +{ + return ma_rand_range_f32(ditherMin, ditherMax); +} + +static MA_INLINE float ma_dither_f32_triangle(float ditherMin, float ditherMax) +{ + float a = ma_rand_range_f32(ditherMin, 0); + float b = ma_rand_range_f32(0, ditherMax); + return a + b; +} + +static MA_INLINE float ma_dither_f32(ma_dither_mode ditherMode, float ditherMin, float ditherMax) +{ + if (ditherMode == ma_dither_mode_rectangle) { + return ma_dither_f32_rectangle(ditherMin, ditherMax); + } + if (ditherMode == ma_dither_mode_triangle) { + return ma_dither_f32_triangle(ditherMin, ditherMax); + } + + return 0; +} + +static MA_INLINE ma_int32 ma_dither_s32(ma_dither_mode ditherMode, ma_int32 ditherMin, ma_int32 ditherMax) +{ + if (ditherMode == ma_dither_mode_rectangle) { + ma_int32 a = ma_rand_range_s32(ditherMin, ditherMax); + return a; + } + if (ditherMode == ma_dither_mode_triangle) { + ma_int32 a = ma_rand_range_s32(ditherMin, 0); + ma_int32 b = ma_rand_range_s32(0, ditherMax); + return a + b; + } + + return 0; +} + + +/************************************************************************************************************************************************************** + +Atomics + +**************************************************************************************************************************************************************/ +/* ma_atomic.h begin */ +#ifndef ma_atomic_h +#if defined(__cplusplus) +extern "C" { +#endif +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wlong-long" +#if defined(__clang__) +#pragma GCC diagnostic ignored "-Wc++11-long-long" +#endif +#endif + typedef int ma_atomic_memory_order; +#define MA_ATOMIC_HAS_8 +#define MA_ATOMIC_HAS_16 +#define MA_ATOMIC_HAS_32 +#define MA_ATOMIC_HAS_64 +#if (defined(_MSC_VER) ) || defined(__WATCOMC__) || defined(__DMC__) +#define MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, intrin, ma_atomicType, msvcType) \ + ma_atomicType result; \ + switch (order) \ + { \ + case ma_atomic_memory_order_relaxed: \ + { \ + result = (ma_atomicType)intrin##_nf((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + case ma_atomic_memory_order_consume: \ + case ma_atomic_memory_order_acquire: \ + { \ + result = (ma_atomicType)intrin##_acq((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + case ma_atomic_memory_order_release: \ + { \ + result = (ma_atomicType)intrin##_rel((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + case ma_atomic_memory_order_acq_rel: \ + case ma_atomic_memory_order_seq_cst: \ + default: \ + { \ + result = (ma_atomicType)intrin((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + } \ + return result; +#define MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, expected, desired, order, intrin, ma_atomicType, msvcType) \ + ma_atomicType result; \ + switch (order) \ + { \ + case ma_atomic_memory_order_relaxed: \ + { \ + result = (ma_atomicType)intrin##_nf((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + case ma_atomic_memory_order_consume: \ + case ma_atomic_memory_order_acquire: \ + { \ + result = (ma_atomicType)intrin##_acq((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + case ma_atomic_memory_order_release: \ + { \ + result = (ma_atomicType)intrin##_rel((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + case ma_atomic_memory_order_acq_rel: \ + case ma_atomic_memory_order_seq_cst: \ + default: \ + { \ + result = (ma_atomicType)intrin((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + } \ + return result; +#define ma_atomic_memory_order_relaxed 0 +#define ma_atomic_memory_order_consume 1 +#define ma_atomic_memory_order_acquire 2 +#define ma_atomic_memory_order_release 3 +#define ma_atomic_memory_order_acq_rel 4 +#define ma_atomic_memory_order_seq_cst 5 +#if _MSC_VER < 1600 && defined(MA_X86) +#define MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY +#endif +#if _MSC_VER < 1600 +#undef MA_ATOMIC_HAS_8 +#undef MA_ATOMIC_HAS_16 +#endif +#if !defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) +#include +#endif +#if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_compare_and_swap_8(volatile ma_uint8* dst, ma_uint8 expected, ma_uint8 desired) + { + ma_uint8 result = 0; + __asm { + mov ecx, dst + mov al, expected + mov dl, desired + lock cmpxchg [ecx], dl + mov result, al + } + return result; + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_compare_and_swap_16(volatile ma_uint16* dst, ma_uint16 expected, ma_uint16 desired) + { + ma_uint16 result = 0; + __asm { + mov ecx, dst + mov ax, expected + mov dx, desired + lock cmpxchg [ecx], dx + mov result, ax + } + return result; + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_compare_and_swap_32(volatile ma_uint32* dst, ma_uint32 expected, ma_uint32 desired) + { + ma_uint32 result = 0; + __asm { + mov ecx, dst + mov eax, expected + mov edx, desired + lock cmpxchg [ecx], edx + mov result, eax + } + return result; + } +#endif +#if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_compare_and_swap_64(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) + { + ma_uint32 resultEAX = 0; + ma_uint32 resultEDX = 0; + __asm { + mov esi, dst + mov eax, dword ptr expected + mov edx, dword ptr expected + 4 + mov ebx, dword ptr desired + mov ecx, dword ptr desired + 4 + lock cmpxchg8b qword ptr [esi] + mov resultEAX, eax + mov resultEDX, edx + } + return ((ma_uint64)resultEDX << 32) | resultEAX; + } +#endif +#else +#if defined(MA_ATOMIC_HAS_8) +#define ma_atomic_compare_and_swap_8( dst, expected, desired) (ma_uint8 )_InterlockedCompareExchange8((volatile char*)dst, (char)desired, (char)expected) +#endif +#if defined(MA_ATOMIC_HAS_16) +#define ma_atomic_compare_and_swap_16(dst, expected, desired) (ma_uint16)_InterlockedCompareExchange16((volatile short*)dst, (short)desired, (short)expected) +#endif +#if defined(MA_ATOMIC_HAS_32) +#define ma_atomic_compare_and_swap_32(dst, expected, desired) (ma_uint32)_InterlockedCompareExchange((volatile long*)dst, (long)desired, (long)expected) +#endif +#if defined(MA_ATOMIC_HAS_64) +#define ma_atomic_compare_and_swap_64(dst, expected, desired) (ma_uint64)_InterlockedCompareExchange64((volatile ma_int64*)dst, (ma_int64)desired, (ma_int64)expected) +#endif +#endif +#if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result = 0; + (void)order; + __asm { + mov ecx, dst + mov al, src + lock xchg [ecx], al + mov result, al + } + return result; + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result = 0; + (void)order; + __asm { + mov ecx, dst + mov ax, src + lock xchg [ecx], ax + mov result, ax + } + return result; + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result = 0; + (void)order; + __asm { + mov ecx, dst + mov eax, src + lock xchg [ecx], eax + mov result, eax + } + return result; + } +#endif +#else +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange8, ma_uint8, char); +#else + (void)order; + return (ma_uint8)_InterlockedExchange8((volatile char*)dst, (char)src); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange16, ma_uint16, short); +#else + (void)order; + return (ma_uint16)_InterlockedExchange16((volatile short*)dst, (short)src); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange, ma_uint32, long); +#else + (void)order; + return (ma_uint32)_InterlockedExchange((volatile long*)dst, (long)src); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_64) && defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange64, ma_uint64, long long); +#else + (void)order; + return (ma_uint64)_InterlockedExchange64((volatile long long*)dst, (long long)src); +#endif + } +#else +#endif +#endif +#if defined(MA_ATOMIC_HAS_64) && !defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + do { + oldValue = *dst; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } +#endif +#if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result = 0; + (void)order; + __asm { + mov ecx, dst + mov al, src + lock xadd [ecx], al + mov result, al + } + return result; + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result = 0; + (void)order; + __asm { + mov ecx, dst + mov ax, src + lock xadd [ecx], ax + mov result, ax + } + return result; + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result = 0; + (void)order; + __asm { + mov ecx, dst + mov eax, src + lock xadd [ecx], eax + mov result, eax + } + return result; + } +#endif +#else +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd8, ma_uint8, char); +#else + (void)order; + return (ma_uint8)_InterlockedExchangeAdd8((volatile char*)dst, (char)src); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd16, ma_uint16, short); +#else + (void)order; + return (ma_uint16)_InterlockedExchangeAdd16((volatile short*)dst, (short)src); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd, ma_uint32, long); +#else + (void)order; + return (ma_uint32)_InterlockedExchangeAdd((volatile long*)dst, (long)src); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_64) && defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd64, ma_uint64, long long); +#else + (void)order; + return (ma_uint64)_InterlockedExchangeAdd64((volatile long long*)dst, (long long)src); +#endif + } +#else +#endif +#endif +#if defined(MA_ATOMIC_HAS_64) && !defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue + src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } +#endif +#if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) + static MA_INLINE void __stdcall ma_atomic_thread_fence(ma_atomic_memory_order order) + { + (void)order; + __asm { + lock add [esp], 0 + } + } +#else +#if defined(MA_X64) +#define ma_atomic_thread_fence(order) __faststorefence(), (void)order +#elif defined(MA_ARM64) +#define ma_atomic_thread_fence(order) __dmb(_ARM64_BARRIER_ISH), (void)order +#else + static MA_INLINE void ma_atomic_thread_fence(ma_atomic_memory_order order) + { + volatile ma_uint32 barrier = 0; + ma_atomic_fetch_add_explicit_32(&barrier, 0, order); + } +#endif +#endif +#define ma_atomic_compiler_fence() ma_atomic_thread_fence(ma_atomic_memory_order_seq_cst) +#define ma_atomic_signal_fence(order) ma_atomic_thread_fence(order) +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 ma_atomic_load_explicit_8(volatile const ma_uint8* ptr, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange8, ma_uint8, char); +#else + (void)order; + return ma_atomic_compare_and_swap_8((volatile ma_uint8*)ptr, 0, 0); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 ma_atomic_load_explicit_16(volatile const ma_uint16* ptr, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange16, ma_uint16, short); +#else + (void)order; + return ma_atomic_compare_and_swap_16((volatile ma_uint16*)ptr, 0, 0); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 ma_atomic_load_explicit_32(volatile const ma_uint32* ptr, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange, ma_uint32, long); +#else + (void)order; + return ma_atomic_compare_and_swap_32((volatile ma_uint32*)ptr, 0, 0); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 ma_atomic_load_explicit_64(volatile const ma_uint64* ptr, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange64, ma_uint64, long long); +#else + (void)order; + return ma_atomic_compare_and_swap_64((volatile ma_uint64*)ptr, 0, 0); +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_8) +#define ma_atomic_store_explicit_8( dst, src, order) (void)ma_atomic_exchange_explicit_8 (dst, src, order) +#endif +#if defined(MA_ATOMIC_HAS_16) +#define ma_atomic_store_explicit_16(dst, src, order) (void)ma_atomic_exchange_explicit_16(dst, src, order) +#endif +#if defined(MA_ATOMIC_HAS_32) +#define ma_atomic_store_explicit_32(dst, src, order) (void)ma_atomic_exchange_explicit_32(dst, src, order) +#endif +#if defined(MA_ATOMIC_HAS_64) +#define ma_atomic_store_explicit_64(dst, src, order) (void)ma_atomic_exchange_explicit_64(dst, src, order) +#endif +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_sub_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue - src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_sub_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue - src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_sub_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } +#endif +#if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_sub_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } +#endif +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_and_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd8, ma_uint8, char); +#else + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue & src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_and_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd16, ma_uint16, short); +#else + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue & src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_and_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd, ma_uint32, long); +#else + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_and_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd64, ma_uint64, long long); +#else + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_xor_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor8, ma_uint8, char); +#else + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_xor_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor16, ma_uint16, short); +#else + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_xor_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor, ma_uint32, long); +#else + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_xor_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor64, ma_uint64, long long); +#else + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_or_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr8, ma_uint8, char); +#else + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue | src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_or_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr16, ma_uint16, short); +#else + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue | src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_or_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr, ma_uint32, long); +#else + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_or_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { +#if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr64, ma_uint64, long long); +#else + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; +#endif + } +#endif +#if defined(MA_ATOMIC_HAS_8) +#define ma_atomic_test_and_set_explicit_8( dst, order) ma_atomic_exchange_explicit_8 (dst, 1, order) +#endif +#if defined(MA_ATOMIC_HAS_16) +#define ma_atomic_test_and_set_explicit_16(dst, order) ma_atomic_exchange_explicit_16(dst, 1, order) +#endif +#if defined(MA_ATOMIC_HAS_32) +#define ma_atomic_test_and_set_explicit_32(dst, order) ma_atomic_exchange_explicit_32(dst, 1, order) +#endif +#if defined(MA_ATOMIC_HAS_64) +#define ma_atomic_test_and_set_explicit_64(dst, order) ma_atomic_exchange_explicit_64(dst, 1, order) +#endif +#if defined(MA_ATOMIC_HAS_8) +#define ma_atomic_clear_explicit_8( dst, order) ma_atomic_store_explicit_8 (dst, 0, order) +#endif +#if defined(MA_ATOMIC_HAS_16) +#define ma_atomic_clear_explicit_16(dst, order) ma_atomic_store_explicit_16(dst, 0, order) +#endif +#if defined(MA_ATOMIC_HAS_32) +#define ma_atomic_clear_explicit_32(dst, order) ma_atomic_store_explicit_32(dst, 0, order) +#endif +#if defined(MA_ATOMIC_HAS_64) +#define ma_atomic_clear_explicit_64(dst, order) ma_atomic_store_explicit_64(dst, 0, order) +#endif +#if defined(MA_ATOMIC_HAS_8) + typedef ma_uint8 ma_atomic_flag; +#define ma_atomic_flag_test_and_set_explicit(ptr, order) (ma_bool32)ma_atomic_test_and_set_explicit_8(ptr, order) +#define ma_atomic_flag_clear_explicit(ptr, order) ma_atomic_clear_explicit_8(ptr, order) +#define c89atoimc_flag_load_explicit(ptr, order) ma_atomic_load_explicit_8(ptr, order) +#else + typedef ma_uint32 ma_atomic_flag; +#define ma_atomic_flag_test_and_set_explicit(ptr, order) (ma_bool32)ma_atomic_test_and_set_explicit_32(ptr, order) +#define ma_atomic_flag_clear_explicit(ptr, order) ma_atomic_clear_explicit_32(ptr, order) +#define c89atoimc_flag_load_explicit(ptr, order) ma_atomic_load_explicit_32(ptr, order) +#endif +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))) +#define MA_ATOMIC_HAS_NATIVE_COMPARE_EXCHANGE +#define MA_ATOMIC_HAS_NATIVE_IS_LOCK_FREE +#define ma_atomic_memory_order_relaxed __ATOMIC_RELAXED +#define ma_atomic_memory_order_consume __ATOMIC_CONSUME +#define ma_atomic_memory_order_acquire __ATOMIC_ACQUIRE +#define ma_atomic_memory_order_release __ATOMIC_RELEASE +#define ma_atomic_memory_order_acq_rel __ATOMIC_ACQ_REL +#define ma_atomic_memory_order_seq_cst __ATOMIC_SEQ_CST +#define ma_atomic_compiler_fence() __asm__ __volatile__("":::"memory") +#define ma_atomic_thread_fence(order) __atomic_thread_fence(order) +#define ma_atomic_signal_fence(order) __atomic_signal_fence(order) +#define ma_atomic_is_lock_free_8(ptr) __atomic_is_lock_free(1, ptr) +#define ma_atomic_is_lock_free_16(ptr) __atomic_is_lock_free(2, ptr) +#define ma_atomic_is_lock_free_32(ptr) __atomic_is_lock_free(4, ptr) +#define ma_atomic_is_lock_free_64(ptr) __atomic_is_lock_free(8, ptr) +#define ma_atomic_test_and_set_explicit_8( dst, order) __atomic_exchange_n(dst, 1, order) +#define ma_atomic_test_and_set_explicit_16(dst, order) __atomic_exchange_n(dst, 1, order) +#define ma_atomic_test_and_set_explicit_32(dst, order) __atomic_exchange_n(dst, 1, order) +#define ma_atomic_test_and_set_explicit_64(dst, order) __atomic_exchange_n(dst, 1, order) +#define ma_atomic_clear_explicit_8( dst, order) __atomic_store_n(dst, 0, order) +#define ma_atomic_clear_explicit_16(dst, order) __atomic_store_n(dst, 0, order) +#define ma_atomic_clear_explicit_32(dst, order) __atomic_store_n(dst, 0, order) +#define ma_atomic_clear_explicit_64(dst, order) __atomic_store_n(dst, 0, order) +#define ma_atomic_store_explicit_8( dst, src, order) __atomic_store_n(dst, src, order) +#define ma_atomic_store_explicit_16(dst, src, order) __atomic_store_n(dst, src, order) +#define ma_atomic_store_explicit_32(dst, src, order) __atomic_store_n(dst, src, order) +#define ma_atomic_store_explicit_64(dst, src, order) __atomic_store_n(dst, src, order) +#define ma_atomic_load_explicit_8( dst, order) __atomic_load_n(dst, order) +#define ma_atomic_load_explicit_16(dst, order) __atomic_load_n(dst, order) +#define ma_atomic_load_explicit_32(dst, order) __atomic_load_n(dst, order) +#define ma_atomic_load_explicit_64(dst, order) __atomic_load_n(dst, order) +#define ma_atomic_exchange_explicit_8( dst, src, order) __atomic_exchange_n(dst, src, order) +#define ma_atomic_exchange_explicit_16(dst, src, order) __atomic_exchange_n(dst, src, order) +#define ma_atomic_exchange_explicit_32(dst, src, order) __atomic_exchange_n(dst, src, order) +#define ma_atomic_exchange_explicit_64(dst, src, order) __atomic_exchange_n(dst, src, order) +#define ma_atomic_compare_exchange_strong_explicit_8( dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_16(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_32(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_64(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_8( dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_16(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_32(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_64(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) +#define ma_atomic_fetch_add_explicit_8( dst, src, order) __atomic_fetch_add(dst, src, order) +#define ma_atomic_fetch_add_explicit_16(dst, src, order) __atomic_fetch_add(dst, src, order) +#define ma_atomic_fetch_add_explicit_32(dst, src, order) __atomic_fetch_add(dst, src, order) +#define ma_atomic_fetch_add_explicit_64(dst, src, order) __atomic_fetch_add(dst, src, order) +#define ma_atomic_fetch_sub_explicit_8( dst, src, order) __atomic_fetch_sub(dst, src, order) +#define ma_atomic_fetch_sub_explicit_16(dst, src, order) __atomic_fetch_sub(dst, src, order) +#define ma_atomic_fetch_sub_explicit_32(dst, src, order) __atomic_fetch_sub(dst, src, order) +#define ma_atomic_fetch_sub_explicit_64(dst, src, order) __atomic_fetch_sub(dst, src, order) +#define ma_atomic_fetch_or_explicit_8( dst, src, order) __atomic_fetch_or(dst, src, order) +#define ma_atomic_fetch_or_explicit_16(dst, src, order) __atomic_fetch_or(dst, src, order) +#define ma_atomic_fetch_or_explicit_32(dst, src, order) __atomic_fetch_or(dst, src, order) +#define ma_atomic_fetch_or_explicit_64(dst, src, order) __atomic_fetch_or(dst, src, order) +#define ma_atomic_fetch_xor_explicit_8( dst, src, order) __atomic_fetch_xor(dst, src, order) +#define ma_atomic_fetch_xor_explicit_16(dst, src, order) __atomic_fetch_xor(dst, src, order) +#define ma_atomic_fetch_xor_explicit_32(dst, src, order) __atomic_fetch_xor(dst, src, order) +#define ma_atomic_fetch_xor_explicit_64(dst, src, order) __atomic_fetch_xor(dst, src, order) +#define ma_atomic_fetch_and_explicit_8( dst, src, order) __atomic_fetch_and(dst, src, order) +#define ma_atomic_fetch_and_explicit_16(dst, src, order) __atomic_fetch_and(dst, src, order) +#define ma_atomic_fetch_and_explicit_32(dst, src, order) __atomic_fetch_and(dst, src, order) +#define ma_atomic_fetch_and_explicit_64(dst, src, order) __atomic_fetch_and(dst, src, order) + static MA_INLINE ma_uint8 ma_atomic_compare_and_swap_8(volatile ma_uint8* dst, ma_uint8 expected, ma_uint8 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + static MA_INLINE ma_uint16 ma_atomic_compare_and_swap_16(volatile ma_uint16* dst, ma_uint16 expected, ma_uint16 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + static MA_INLINE ma_uint32 ma_atomic_compare_and_swap_32(volatile ma_uint32* dst, ma_uint32 expected, ma_uint32 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + static MA_INLINE ma_uint64 ma_atomic_compare_and_swap_64(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + typedef ma_uint8 ma_atomic_flag; +#define ma_atomic_flag_test_and_set_explicit(dst, order) (ma_bool32)__atomic_test_and_set(dst, order) +#define ma_atomic_flag_clear_explicit(dst, order) __atomic_clear(dst, order) +#define c89atoimc_flag_load_explicit(ptr, order) ma_atomic_load_explicit_8(ptr, order) +#else +#define ma_atomic_memory_order_relaxed 1 +#define ma_atomic_memory_order_consume 2 +#define ma_atomic_memory_order_acquire 3 +#define ma_atomic_memory_order_release 4 +#define ma_atomic_memory_order_acq_rel 5 +#define ma_atomic_memory_order_seq_cst 6 +#define ma_atomic_compiler_fence() __asm__ __volatile__("":::"memory") +#if defined(__GNUC__) +#define ma_atomic_thread_fence(order) __sync_synchronize(), (void)order + static MA_INLINE ma_uint8 ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + if (order > ma_atomic_memory_order_acquire) { + __sync_synchronize(); + } + return __sync_lock_test_and_set(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + do { + oldValue = *dst; + } while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + do { + oldValue = *dst; + } while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + do { + oldValue = *dst; + } while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_sub_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_sub_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_sub_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_sub_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_or_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_or_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_or_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_or_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_xor_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_xor_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_xor_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_xor_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_and_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_and_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_and_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_and_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } +#define ma_atomic_compare_and_swap_8( dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) +#define ma_atomic_compare_and_swap_16(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) +#define ma_atomic_compare_and_swap_32(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) +#define ma_atomic_compare_and_swap_64(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) +#else +#if defined(MA_X86) +#define ma_atomic_thread_fence(order) __asm__ __volatile__("lock; addl $0, (%%esp)" ::: "memory", "cc") +#elif defined(MA_X64) +#define ma_atomic_thread_fence(order) __asm__ __volatile__("lock; addq $0, (%%rsp)" ::: "memory", "cc") +#else +#error Unsupported architecture. Please submit a feature request. +#endif + static MA_INLINE ma_uint8 ma_atomic_compare_and_swap_8(volatile ma_uint8* dst, ma_uint8 expected, ma_uint8 desired) + { + ma_uint8 result; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint16 ma_atomic_compare_and_swap_16(volatile ma_uint16* dst, ma_uint16 expected, ma_uint16 desired) + { + ma_uint16 result; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint32 ma_atomic_compare_and_swap_32(volatile ma_uint32* dst, ma_uint32 expected, ma_uint32 desired) + { + ma_uint32 result; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint64 ma_atomic_compare_and_swap_64(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) + { + volatile ma_uint64 result; +#if defined(MA_X86) + ma_uint32 resultEAX; + ma_uint32 resultEDX; + __asm__ __volatile__("push %%ebx; xchg %5, %%ebx; lock; cmpxchg8b %0; pop %%ebx" : "+m"(*dst), "=a"(resultEAX), "=d"(resultEDX) : "a"(expected & 0xFFFFFFFF), "d"(expected >> 32), "r"(desired & 0xFFFFFFFF), "c"(desired >> 32) : "cc"); + result = ((ma_uint64)resultEDX << 32) | resultEAX; +#elif defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint8 ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result = 0; + (void)order; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint16 ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result = 0; + (void)order; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint32 ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result; + (void)order; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint64 ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 result; + (void)order; +#if defined(MA_X86) + do { + result = *dst; + } while (ma_atomic_compare_and_swap_64(dst, result, src) != result); +#elif defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result; + (void)order; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result; + (void)order; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result; + (void)order; +#if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); +#else +#error Unsupported architecture. Please submit a feature request. +#endif + return result; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { +#if defined(MA_X86) + ma_uint64 oldValue; + ma_uint64 newValue; + (void)order; + do { + oldValue = *dst; + newValue = oldValue + src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + return oldValue; +#elif defined(MA_X64) + ma_uint64 result; + (void)order; + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); + return result; +#endif + } + static MA_INLINE ma_uint8 ma_atomic_fetch_sub_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue - src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_sub_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue - src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_sub_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_sub_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_and_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue & src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_and_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue & src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_and_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_and_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_xor_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_xor_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_xor_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_xor_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_or_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue | src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_or_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue | src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_or_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_or_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } +#endif +#define ma_atomic_signal_fence(order) ma_atomic_thread_fence(order) + static MA_INLINE ma_uint8 ma_atomic_load_explicit_8(volatile const ma_uint8* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_8((ma_uint8*)ptr, 0, 0); + } + static MA_INLINE ma_uint16 ma_atomic_load_explicit_16(volatile const ma_uint16* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_16((ma_uint16*)ptr, 0, 0); + } + static MA_INLINE ma_uint32 ma_atomic_load_explicit_32(volatile const ma_uint32* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_32((ma_uint32*)ptr, 0, 0); + } + static MA_INLINE ma_uint64 ma_atomic_load_explicit_64(volatile const ma_uint64* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_64((ma_uint64*)ptr, 0, 0); + } +#define ma_atomic_store_explicit_8( dst, src, order) (void)ma_atomic_exchange_explicit_8 (dst, src, order) +#define ma_atomic_store_explicit_16(dst, src, order) (void)ma_atomic_exchange_explicit_16(dst, src, order) +#define ma_atomic_store_explicit_32(dst, src, order) (void)ma_atomic_exchange_explicit_32(dst, src, order) +#define ma_atomic_store_explicit_64(dst, src, order) (void)ma_atomic_exchange_explicit_64(dst, src, order) +#define ma_atomic_test_and_set_explicit_8( dst, order) ma_atomic_exchange_explicit_8 (dst, 1, order) +#define ma_atomic_test_and_set_explicit_16(dst, order) ma_atomic_exchange_explicit_16(dst, 1, order) +#define ma_atomic_test_and_set_explicit_32(dst, order) ma_atomic_exchange_explicit_32(dst, 1, order) +#define ma_atomic_test_and_set_explicit_64(dst, order) ma_atomic_exchange_explicit_64(dst, 1, order) +#define ma_atomic_clear_explicit_8( dst, order) ma_atomic_store_explicit_8 (dst, 0, order) +#define ma_atomic_clear_explicit_16(dst, order) ma_atomic_store_explicit_16(dst, 0, order) +#define ma_atomic_clear_explicit_32(dst, order) ma_atomic_store_explicit_32(dst, 0, order) +#define ma_atomic_clear_explicit_64(dst, order) ma_atomic_store_explicit_64(dst, 0, order) + typedef ma_uint8 ma_atomic_flag; +#define ma_atomic_flag_test_and_set_explicit(ptr, order) (ma_bool32)ma_atomic_test_and_set_explicit_8(ptr, order) +#define ma_atomic_flag_clear_explicit(ptr, order) ma_atomic_clear_explicit_8(ptr, order) +#define c89atoimc_flag_load_explicit(ptr, order) ma_atomic_load_explicit_8(ptr, order) +#endif +#if !defined(MA_ATOMIC_HAS_NATIVE_COMPARE_EXCHANGE) +#if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_8(volatile ma_uint8* dst, ma_uint8* expected, ma_uint8 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint8 expectedValue; + ma_uint8 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_8(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_8(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_8(expected, result, failureOrder); + return 0; + } + } +#endif +#if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_16(volatile ma_uint16* dst, ma_uint16* expected, ma_uint16 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint16 expectedValue; + ma_uint16 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_16(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_16(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_16(expected, result, failureOrder); + return 0; + } + } +#endif +#if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_32(volatile ma_uint32* dst, ma_uint32* expected, ma_uint32 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint32 expectedValue; + ma_uint32 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_32(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_32(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_32(expected, result, failureOrder); + return 0; + } + } +#endif +#if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_64(volatile ma_uint64* dst, volatile ma_uint64* expected, ma_uint64 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint64 expectedValue; + ma_uint64 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_64(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_64(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_64(expected, result, failureOrder); + return 0; + } + } +#endif +#define ma_atomic_compare_exchange_weak_explicit_8( dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_8 (dst, expected, desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_16(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_16(dst, expected, desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_32(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_32(dst, expected, desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_64(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_64(dst, expected, desired, successOrder, failureOrder) +#endif +#if !defined(MA_ATOMIC_HAS_NATIVE_IS_LOCK_FREE) + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_8(volatile void* ptr) + { + (void)ptr; + return 1; + } + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_16(volatile void* ptr) + { + (void)ptr; + return 1; + } + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_32(volatile void* ptr) + { + (void)ptr; + return 1; + } + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_64(volatile void* ptr) + { + (void)ptr; +#if defined(MA_64BIT) + return 1; +#else +#if defined(MA_X86) || defined(MA_X64) + return 1; +#else + return 0; +#endif +#endif + } +#endif +#if defined(MA_64BIT) + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_ptr(volatile void** ptr) + { + return ma_atomic_is_lock_free_64((volatile ma_uint64*)ptr); + } + static MA_INLINE void* ma_atomic_load_explicit_ptr(volatile void** ptr, ma_atomic_memory_order order) + { + return (void*)ma_atomic_load_explicit_64((volatile ma_uint64*)ptr, order); + } + static MA_INLINE void ma_atomic_store_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + ma_atomic_store_explicit_64((volatile ma_uint64*)dst, (ma_uint64)src, order); + } + static MA_INLINE void* ma_atomic_exchange_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + return (void*)ma_atomic_exchange_explicit_64((volatile ma_uint64*)dst, (ma_uint64)src, order); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_strong_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_weak_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder); + } + static MA_INLINE void* ma_atomic_compare_and_swap_ptr(volatile void** dst, void* expected, void* desired) + { + return (void*)ma_atomic_compare_and_swap_64((volatile ma_uint64*)dst, (ma_uint64)expected, (ma_uint64)desired); + } +#elif defined(MA_32BIT) + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_ptr(volatile void** ptr) + { + return ma_atomic_is_lock_free_32((volatile ma_uint32*)ptr); + } + static MA_INLINE void* ma_atomic_load_explicit_ptr(volatile void** ptr, ma_atomic_memory_order order) + { + return (void*)ma_atomic_load_explicit_32((volatile ma_uint32*)ptr, order); + } + static MA_INLINE void ma_atomic_store_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + ma_atomic_store_explicit_32((volatile ma_uint32*)dst, (ma_uint32)src, order); + } + static MA_INLINE void* ma_atomic_exchange_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + return (void*)ma_atomic_exchange_explicit_32((volatile ma_uint32*)dst, (ma_uint32)src, order); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_strong_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_weak_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder); + } + static MA_INLINE void* ma_atomic_compare_and_swap_ptr(volatile void** dst, void* expected, void* desired) + { + return (void*)ma_atomic_compare_and_swap_32((volatile ma_uint32*)dst, (ma_uint32)expected, (ma_uint32)desired); + } +#else +#error Unsupported architecture. +#endif +#define ma_atomic_flag_test_and_set(ptr) ma_atomic_flag_test_and_set_explicit(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_flag_clear(ptr) ma_atomic_flag_clear_explicit(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_ptr(dst, src) ma_atomic_store_explicit_ptr((volatile void**)dst, (void*)src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_ptr(ptr) ma_atomic_load_explicit_ptr((volatile void**)ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_ptr(dst, src) ma_atomic_exchange_explicit_ptr((volatile void**)dst, (void*)src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_ptr(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_ptr((volatile void**)dst, (void**)expected, (void*)desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_ptr(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_ptr((volatile void**)dst, (void**)expected, (void*)desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_8( ptr) ma_atomic_test_and_set_explicit_8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_16(ptr) ma_atomic_test_and_set_explicit_16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_32(ptr) ma_atomic_test_and_set_explicit_32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_64(ptr) ma_atomic_test_and_set_explicit_64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_8( ptr) ma_atomic_clear_explicit_8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_16(ptr) ma_atomic_clear_explicit_16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_32(ptr) ma_atomic_clear_explicit_32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_64(ptr) ma_atomic_clear_explicit_64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_8( dst, src) ma_atomic_store_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_16(dst, src) ma_atomic_store_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_32(dst, src) ma_atomic_store_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_64(dst, src) ma_atomic_store_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_8( ptr) ma_atomic_load_explicit_8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_16(ptr) ma_atomic_load_explicit_16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_32(ptr) ma_atomic_load_explicit_32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_64(ptr) ma_atomic_load_explicit_64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_8( dst, src) ma_atomic_exchange_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_16(dst, src) ma_atomic_exchange_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_32(dst, src) ma_atomic_exchange_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_64(dst, src) ma_atomic_exchange_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_8( dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_16(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_32(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_64(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_8( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_16( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_32( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_64( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_8( dst, src) ma_atomic_fetch_add_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_16(dst, src) ma_atomic_fetch_add_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_32(dst, src) ma_atomic_fetch_add_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_64(dst, src) ma_atomic_fetch_add_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_8( dst, src) ma_atomic_fetch_sub_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_16(dst, src) ma_atomic_fetch_sub_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_32(dst, src) ma_atomic_fetch_sub_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_64(dst, src) ma_atomic_fetch_sub_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_8( dst, src) ma_atomic_fetch_or_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_16(dst, src) ma_atomic_fetch_or_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_32(dst, src) ma_atomic_fetch_or_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_64(dst, src) ma_atomic_fetch_or_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_8( dst, src) ma_atomic_fetch_xor_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_16(dst, src) ma_atomic_fetch_xor_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_32(dst, src) ma_atomic_fetch_xor_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_64(dst, src) ma_atomic_fetch_xor_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_8( dst, src) ma_atomic_fetch_and_explicit_8 (dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_16(dst, src) ma_atomic_fetch_and_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_32(dst, src) ma_atomic_fetch_and_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_64(dst, src) ma_atomic_fetch_and_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_explicit_i8( ptr, order) (ma_int8 )ma_atomic_test_and_set_explicit_8( (ma_uint8* )ptr, order) +#define ma_atomic_test_and_set_explicit_i16(ptr, order) (ma_int16)ma_atomic_test_and_set_explicit_16((ma_uint16*)ptr, order) +#define ma_atomic_test_and_set_explicit_i32(ptr, order) (ma_int32)ma_atomic_test_and_set_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_test_and_set_explicit_i64(ptr, order) (ma_int64)ma_atomic_test_and_set_explicit_64((ma_uint64*)ptr, order) +#define ma_atomic_clear_explicit_i8( ptr, order) ma_atomic_clear_explicit_8( (ma_uint8* )ptr, order) +#define ma_atomic_clear_explicit_i16(ptr, order) ma_atomic_clear_explicit_16((ma_uint16*)ptr, order) +#define ma_atomic_clear_explicit_i32(ptr, order) ma_atomic_clear_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_clear_explicit_i64(ptr, order) ma_atomic_clear_explicit_64((ma_uint64*)ptr, order) +#define ma_atomic_store_explicit_i8( dst, src, order) ma_atomic_store_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_store_explicit_i16(dst, src, order) ma_atomic_store_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_store_explicit_i32(dst, src, order) ma_atomic_store_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_store_explicit_i64(dst, src, order) ma_atomic_store_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_load_explicit_i8( ptr, order) (ma_int8 )ma_atomic_load_explicit_8( (ma_uint8* )ptr, order) +#define ma_atomic_load_explicit_i16(ptr, order) (ma_int16)ma_atomic_load_explicit_16((ma_uint16*)ptr, order) +#define ma_atomic_load_explicit_i32(ptr, order) (ma_int32)ma_atomic_load_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_load_explicit_i64(ptr, order) (ma_int64)ma_atomic_load_explicit_64((ma_uint64*)ptr, order) +#define ma_atomic_exchange_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_exchange_explicit_8 ((ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_exchange_explicit_i16(dst, src, order) (ma_int16)ma_atomic_exchange_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_exchange_explicit_i32(dst, src, order) (ma_int32)ma_atomic_exchange_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_exchange_explicit_i64(dst, src, order) (ma_int64)ma_atomic_exchange_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_compare_exchange_strong_explicit_i8( dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_8( (ma_uint8* )dst, (ma_uint8* )expected, (ma_uint8 )desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_i16(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_16((ma_uint16*)dst, (ma_uint16*)expected, (ma_uint16)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_i32(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_32((ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_i64(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_64((ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i8( dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_8( (ma_uint8* )dst, (ma_uint8* )expected, (ma_uint8 )desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i16(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_16((ma_uint16*)dst, (ma_uint16*)expected, (ma_uint16)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i32(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_32((ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i64(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_64((ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder) +#define ma_atomic_fetch_add_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_add_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_add_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_add_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_add_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_add_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_add_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_add_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_sub_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_sub_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_sub_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_sub_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_sub_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_sub_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_sub_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_sub_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_or_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_or_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_or_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_or_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_or_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_or_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_or_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_or_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_xor_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_xor_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_xor_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_xor_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_xor_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_xor_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_xor_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_xor_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_and_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_and_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_and_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_and_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_and_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_and_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_and_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_and_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_test_and_set_i8( ptr) ma_atomic_test_and_set_explicit_i8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_i16(ptr) ma_atomic_test_and_set_explicit_i16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_i32(ptr) ma_atomic_test_and_set_explicit_i32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_i64(ptr) ma_atomic_test_and_set_explicit_i64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i8( ptr) ma_atomic_clear_explicit_i8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i16(ptr) ma_atomic_clear_explicit_i16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i32(ptr) ma_atomic_clear_explicit_i32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i64(ptr) ma_atomic_clear_explicit_i64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i8( dst, src) ma_atomic_store_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i16(dst, src) ma_atomic_store_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i32(dst, src) ma_atomic_store_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i64(dst, src) ma_atomic_store_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i8( ptr) ma_atomic_load_explicit_i8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i16(ptr) ma_atomic_load_explicit_i16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i32(ptr) ma_atomic_load_explicit_i32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i64(ptr) ma_atomic_load_explicit_i64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i8( dst, src) ma_atomic_exchange_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i16(dst, src) ma_atomic_exchange_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i32(dst, src) ma_atomic_exchange_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i64(dst, src) ma_atomic_exchange_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i8( dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i16(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i32(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i64(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i8( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i16(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i32(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i64(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i8( dst, src) ma_atomic_fetch_add_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i16(dst, src) ma_atomic_fetch_add_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i32(dst, src) ma_atomic_fetch_add_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i64(dst, src) ma_atomic_fetch_add_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i8( dst, src) ma_atomic_fetch_sub_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i16(dst, src) ma_atomic_fetch_sub_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i32(dst, src) ma_atomic_fetch_sub_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i64(dst, src) ma_atomic_fetch_sub_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i8( dst, src) ma_atomic_fetch_or_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i16(dst, src) ma_atomic_fetch_or_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i32(dst, src) ma_atomic_fetch_or_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i64(dst, src) ma_atomic_fetch_or_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i8( dst, src) ma_atomic_fetch_xor_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i16(dst, src) ma_atomic_fetch_xor_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i32(dst, src) ma_atomic_fetch_xor_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i64(dst, src) ma_atomic_fetch_xor_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i8( dst, src) ma_atomic_fetch_and_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i16(dst, src) ma_atomic_fetch_and_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i32(dst, src) ma_atomic_fetch_and_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i64(dst, src) ma_atomic_fetch_and_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_and_swap_i8( dst, expected, dedsired) (ma_int8 )ma_atomic_compare_and_swap_8( (ma_uint8* )dst, (ma_uint8 )expected, (ma_uint8 )dedsired) +#define ma_atomic_compare_and_swap_i16(dst, expected, dedsired) (ma_int16)ma_atomic_compare_and_swap_16((ma_uint16*)dst, (ma_uint16)expected, (ma_uint16)dedsired) +#define ma_atomic_compare_and_swap_i32(dst, expected, dedsired) (ma_int32)ma_atomic_compare_and_swap_32((ma_uint32*)dst, (ma_uint32)expected, (ma_uint32)dedsired) +#define ma_atomic_compare_and_swap_i64(dst, expected, dedsired) (ma_int64)ma_atomic_compare_and_swap_64((ma_uint64*)dst, (ma_uint64)expected, (ma_uint64)dedsired) + typedef union + { + ma_uint32 i; + float f; + } ma_atomic_if32; + typedef union + { + ma_uint64 i; + double f; + } ma_atomic_if64; +#define ma_atomic_clear_explicit_f32(ptr, order) ma_atomic_clear_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_clear_explicit_f64(ptr, order) ma_atomic_clear_explicit_64((ma_uint64*)ptr, order) + static MA_INLINE void ma_atomic_store_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) + { + ma_atomic_if32 x; + x.f = src; + ma_atomic_store_explicit_32((volatile ma_uint32*)dst, x.i, order); + } + static MA_INLINE void ma_atomic_store_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) + { + ma_atomic_if64 x; + x.f = src; + ma_atomic_store_explicit_64((volatile ma_uint64*)dst, x.i, order); + } + static MA_INLINE float ma_atomic_load_explicit_f32(volatile const float* ptr, ma_atomic_memory_order order) + { + ma_atomic_if32 r; + r.i = ma_atomic_load_explicit_32((volatile const ma_uint32*)ptr, order); + return r.f; + } + static MA_INLINE double ma_atomic_load_explicit_f64(volatile const double* ptr, ma_atomic_memory_order order) + { + ma_atomic_if64 r; + r.i = ma_atomic_load_explicit_64((volatile const ma_uint64*)ptr, order); + return r.f; + } + static MA_INLINE float ma_atomic_exchange_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) + { + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_exchange_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; + } + static MA_INLINE double ma_atomic_exchange_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) + { + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_exchange_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_f32(volatile float* dst, float* expected, float desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_atomic_if32 d; + d.f = desired; + return ma_atomic_compare_exchange_strong_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, d.i, successOrder, failureOrder); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_f64(volatile double* dst, double* expected, double desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_atomic_if64 d; + d.f = desired; + return ma_atomic_compare_exchange_strong_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, d.i, successOrder, failureOrder); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_f32(volatile float* dst, float* expected, float desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_atomic_if32 d; + d.f = desired; + return ma_atomic_compare_exchange_weak_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, d.i, successOrder, failureOrder); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_f64(volatile double* dst, double* expected, double desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_atomic_if64 d; + d.f = desired; + return ma_atomic_compare_exchange_weak_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, d.i, successOrder, failureOrder); + } + static MA_INLINE float ma_atomic_fetch_add_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) + { + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_add_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; + } + static MA_INLINE double ma_atomic_fetch_add_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) + { + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_add_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; + } + static MA_INLINE float ma_atomic_fetch_sub_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) + { + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_sub_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; + } + static MA_INLINE double ma_atomic_fetch_sub_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) + { + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_sub_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; + } + static MA_INLINE float ma_atomic_fetch_or_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) + { + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_or_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; + } + static MA_INLINE double ma_atomic_fetch_or_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) + { + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_or_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; + } + static MA_INLINE float ma_atomic_fetch_xor_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) + { + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_xor_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; + } + static MA_INLINE double ma_atomic_fetch_xor_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) + { + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_xor_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; + } + static MA_INLINE float ma_atomic_fetch_and_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) + { + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_and_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; + } + static MA_INLINE double ma_atomic_fetch_and_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) + { + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_and_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; + } +#define ma_atomic_clear_f32(ptr) (float )ma_atomic_clear_explicit_f32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_f64(ptr) (double)ma_atomic_clear_explicit_f64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_f32(dst, src) ma_atomic_store_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_f64(dst, src) ma_atomic_store_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_f32(ptr) (float )ma_atomic_load_explicit_f32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_f64(ptr) (double)ma_atomic_load_explicit_f64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_f32(dst, src) (float )ma_atomic_exchange_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_f64(dst, src) (double)ma_atomic_exchange_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_f32(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_f32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_f64(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_f64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_f32(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_f32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_f64(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_f64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_f32(dst, src) ma_atomic_fetch_add_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_f64(dst, src) ma_atomic_fetch_add_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_f32(dst, src) ma_atomic_fetch_sub_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_f64(dst, src) ma_atomic_fetch_sub_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_f32(dst, src) ma_atomic_fetch_or_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_f64(dst, src) ma_atomic_fetch_or_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_f32(dst, src) ma_atomic_fetch_xor_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_f64(dst, src) ma_atomic_fetch_xor_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_f32(dst, src) ma_atomic_fetch_and_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_f64(dst, src) ma_atomic_fetch_and_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) + static MA_INLINE float ma_atomic_compare_and_swap_f32(volatile float* dst, float expected, float desired) + { + ma_atomic_if32 r; + ma_atomic_if32 e, d; + e.f = expected; + d.f = desired; + r.i = ma_atomic_compare_and_swap_32((volatile ma_uint32*)dst, e.i, d.i); + return r.f; + } + static MA_INLINE double ma_atomic_compare_and_swap_f64(volatile double* dst, double expected, double desired) + { + ma_atomic_if64 r; + ma_atomic_if64 e, d; + e.f = expected; + d.f = desired; + r.i = ma_atomic_compare_and_swap_64((volatile ma_uint64*)dst, e.i, d.i); + return r.f; + } + typedef ma_atomic_flag ma_atomic_spinlock; + static MA_INLINE void ma_atomic_spinlock_lock(volatile ma_atomic_spinlock* pSpinlock) + { + for (;;) { + if (ma_atomic_flag_test_and_set_explicit(pSpinlock, ma_atomic_memory_order_acquire) == 0) { + break; + } + while (c89atoimc_flag_load_explicit(pSpinlock, ma_atomic_memory_order_relaxed) == 1) { + } + } + } + static MA_INLINE void ma_atomic_spinlock_unlock(volatile ma_atomic_spinlock* pSpinlock) + { + ma_atomic_flag_clear_explicit(pSpinlock, ma_atomic_memory_order_release); + } +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic pop +#endif +#if defined(__cplusplus) +} +#endif +#endif +/* ma_atomic.h end */ + +#define MA_ATOMIC_SAFE_TYPE_IMPL(c89TypeExtension, type) \ + static MA_INLINE ma_##type ma_atomic_##type##_get(ma_atomic_##type* x) \ + { \ + return (ma_##type)ma_atomic_load_##c89TypeExtension(&x->value); \ + } \ + static MA_INLINE void ma_atomic_##type##_set(ma_atomic_##type* x, ma_##type value) \ + { \ + ma_atomic_store_##c89TypeExtension(&x->value, value); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_exchange(ma_atomic_##type* x, ma_##type value) \ + { \ + return (ma_##type)ma_atomic_exchange_##c89TypeExtension(&x->value, value); \ + } \ + static MA_INLINE ma_bool32 ma_atomic_##type##_compare_exchange(ma_atomic_##type* x, ma_##type* expected, ma_##type desired) \ + { \ + return ma_atomic_compare_exchange_weak_##c89TypeExtension(&x->value, expected, desired); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_add(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_add_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_sub(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_sub_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_or(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_or_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_xor(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_xor_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_and(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_and_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_compare_and_swap(ma_atomic_##type* x, ma_##type expected, ma_##type desired) \ + { \ + return (ma_##type)ma_atomic_compare_and_swap_##c89TypeExtension(&x->value, expected, desired); \ + } \ + +#define MA_ATOMIC_SAFE_TYPE_IMPL_PTR(type) \ + static MA_INLINE ma_##type* ma_atomic_ptr_##type##_get(ma_atomic_ptr_##type* x) \ + { \ + return ma_atomic_load_ptr((void**)&x->value); \ + } \ + static MA_INLINE void ma_atomic_ptr_##type##_set(ma_atomic_ptr_##type* x, ma_##type* value) \ + { \ + ma_atomic_store_ptr((void**)&x->value, (void*)value); \ + } \ + static MA_INLINE ma_##type* ma_atomic_ptr_##type##_exchange(ma_atomic_ptr_##type* x, ma_##type* value) \ + { \ + return ma_atomic_exchange_ptr((void**)&x->value, (void*)value); \ + } \ + static MA_INLINE ma_bool32 ma_atomic_ptr_##type##_compare_exchange(ma_atomic_ptr_##type* x, ma_##type** expected, ma_##type* desired) \ + { \ + return ma_atomic_compare_exchange_weak_ptr((void**)&x->value, (void*)expected, (void*)desired); \ + } \ + static MA_INLINE ma_##type* ma_atomic_ptr_##type##_compare_and_swap(ma_atomic_ptr_##type* x, ma_##type* expected, ma_##type* desired) \ + { \ + return (ma_##type*)ma_atomic_compare_and_swap_ptr((void**)&x->value, (void*)expected, (void*)desired); \ + } \ + +MA_ATOMIC_SAFE_TYPE_IMPL(32, uint32) +MA_ATOMIC_SAFE_TYPE_IMPL(i32, int32) +MA_ATOMIC_SAFE_TYPE_IMPL(64, uint64) +MA_ATOMIC_SAFE_TYPE_IMPL(f32, float) +MA_ATOMIC_SAFE_TYPE_IMPL(32, bool32) + +#if !defined(MA_NO_DEVICE_IO) +MA_ATOMIC_SAFE_TYPE_IMPL(i32, device_state) +#endif + + +MA_API ma_uint64 ma_calculate_frame_count_after_resampling(ma_uint32 sampleRateOut, ma_uint32 sampleRateIn, ma_uint64 frameCountIn) +{ + /* This is based on the calculation in ma_linear_resampler_get_expected_output_frame_count(). */ + ma_uint64 outputFrameCount; + ma_uint64 preliminaryInputFrameCountFromFrac; + ma_uint64 preliminaryInputFrameCount; + + if (sampleRateIn == 0 || sampleRateOut == 0 || frameCountIn == 0) { + return 0; + } + + if (sampleRateOut == sampleRateIn) { + return frameCountIn; + } + + outputFrameCount = (frameCountIn * sampleRateOut) / sampleRateIn; + + preliminaryInputFrameCountFromFrac = (outputFrameCount * (sampleRateIn / sampleRateOut)) / sampleRateOut; + preliminaryInputFrameCount = (outputFrameCount * (sampleRateIn % sampleRateOut)) + preliminaryInputFrameCountFromFrac; + + if (preliminaryInputFrameCount <= frameCountIn) { + outputFrameCount += 1; + } + + return outputFrameCount; +} + +#ifndef MA_DATA_CONVERTER_STACK_BUFFER_SIZE +#define MA_DATA_CONVERTER_STACK_BUFFER_SIZE 4096 +#endif + + + +#if defined(MA_WIN32) +static ma_result ma_result_from_GetLastError(DWORD error) +{ + switch (error) + { + case ERROR_SUCCESS: return MA_SUCCESS; + case ERROR_PATH_NOT_FOUND: return MA_DOES_NOT_EXIST; + case ERROR_TOO_MANY_OPEN_FILES: return MA_TOO_MANY_OPEN_FILES; + case ERROR_NOT_ENOUGH_MEMORY: return MA_OUT_OF_MEMORY; + case ERROR_DISK_FULL: return MA_NO_SPACE; + case ERROR_HANDLE_EOF: return MA_AT_END; + case ERROR_NEGATIVE_SEEK: return MA_BAD_SEEK; + case ERROR_INVALID_PARAMETER: return MA_INVALID_ARGS; + case ERROR_ACCESS_DENIED: return MA_ACCESS_DENIED; + case ERROR_SEM_TIMEOUT: return MA_TIMEOUT; + case ERROR_FILE_NOT_FOUND: return MA_DOES_NOT_EXIST; + default: break; + } + + return MA_ERROR; +} +#endif /* MA_WIN32 */ + + +/******************************************************************************* + +Threading + +*******************************************************************************/ +static MA_INLINE ma_result ma_spinlock_lock_ex(volatile ma_spinlock* pSpinlock, ma_bool32 yield) +{ + if (pSpinlock == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + if (ma_atomic_exchange_explicit_32(pSpinlock, 1, ma_atomic_memory_order_acquire) == 0) { + break; + } + + while (ma_atomic_load_explicit_32(pSpinlock, ma_atomic_memory_order_relaxed) == 1) { + if (yield) { + ma_yield(); + } + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spinlock_lock(volatile ma_spinlock* pSpinlock) +{ + return ma_spinlock_lock_ex(pSpinlock, MA_TRUE); +} + +MA_API ma_result ma_spinlock_lock_noyield(volatile ma_spinlock* pSpinlock) +{ + return ma_spinlock_lock_ex(pSpinlock, MA_FALSE); +} + +MA_API ma_result ma_spinlock_unlock(volatile ma_spinlock* pSpinlock) +{ + if (pSpinlock == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_store_explicit_32(pSpinlock, 0, ma_atomic_memory_order_release); + return MA_SUCCESS; +} + + +#ifndef MA_NO_THREADING +#if defined(MA_POSIX) +#define MA_THREADCALL +typedef void* ma_thread_result; +#elif defined(MA_WIN32) +#define MA_THREADCALL WINAPI +typedef unsigned long ma_thread_result; +#endif + +typedef ma_thread_result (MA_THREADCALL * ma_thread_entry_proc)(void* pData); + +#ifdef MA_POSIX +static ma_result ma_thread_create__posix(ma_thread* pThread, ma_thread_priority priority, size_t stackSize, ma_thread_entry_proc entryProc, void* pData) +{ + int result; + pthread_attr_t* pAttr = NULL; + +#if !defined(__EMSCRIPTEN__) + /* Try setting the thread priority. It's not critical if anything fails here. */ + pthread_attr_t attr; + if (pthread_attr_init(&attr) == 0) { + int scheduler = -1; + + /* We successfully initialized our attributes object so we can assign the pointer so it's passed into pthread_create(). */ + pAttr = &attr; + + /* We need to set the scheduler policy. Only do this if the OS supports pthread_attr_setschedpolicy() */ +#if !defined(MA_BEOS) + { + if (priority == ma_thread_priority_idle) { +#ifdef SCHED_IDLE + if (pthread_attr_setschedpolicy(&attr, SCHED_IDLE) == 0) { + scheduler = SCHED_IDLE; + } +#endif + } else if (priority == ma_thread_priority_realtime) { +#ifdef SCHED_FIFO + if (pthread_attr_setschedpolicy(&attr, SCHED_FIFO) == 0) { + scheduler = SCHED_FIFO; + } +#endif +#ifdef MA_LINUX + } else { + scheduler = sched_getscheduler(0); +#endif + } + } +#endif + + if (stackSize > 0) { + pthread_attr_setstacksize(&attr, stackSize); + } + + if (scheduler != -1) { + int priorityMin = sched_get_priority_min(scheduler); + int priorityMax = sched_get_priority_max(scheduler); + int priorityStep = (priorityMax - priorityMin) / 7; /* 7 = number of priorities supported by miniaudio. */ + + struct sched_param sched; + if (pthread_attr_getschedparam(&attr, &sched) == 0) { + if (priority == ma_thread_priority_idle) { + sched.sched_priority = priorityMin; + } else if (priority == ma_thread_priority_realtime) { + sched.sched_priority = priorityMax; + } else { + sched.sched_priority += ((int)priority + 5) * priorityStep; /* +5 because the lowest priority is -5. */ + if (sched.sched_priority < priorityMin) { + sched.sched_priority = priorityMin; + } + if (sched.sched_priority > priorityMax) { + sched.sched_priority = priorityMax; + } + } + + /* I'm not treating a failure of setting the priority as a critical error so not checking the return value here. */ + pthread_attr_setschedparam(&attr, &sched); + } + } + } +#else + /* It's the emscripten build. We'll have a few unused parameters. */ + (void)priority; + (void)stackSize; +#endif + + result = pthread_create((pthread_t*)pThread, pAttr, entryProc, pData); + + /* The thread attributes object is no longer required. */ + if (pAttr != NULL) { + pthread_attr_destroy(pAttr); + } + + if (result != 0) { + return ma_result_from_errno(result); + } + + return MA_SUCCESS; +} + +static void ma_thread_wait__posix(ma_thread* pThread) +{ + pthread_join((pthread_t)*pThread, NULL); +} + + +static ma_result ma_mutex_init__posix(ma_mutex* pMutex) +{ + int result = pthread_mutex_init((pthread_mutex_t*)pMutex, NULL); + if (result != 0) { + return ma_result_from_errno(result); + } + + return MA_SUCCESS; +} + +static void ma_mutex_uninit__posix(ma_mutex* pMutex) +{ + pthread_mutex_destroy((pthread_mutex_t*)pMutex); +} + +static void ma_mutex_lock__posix(ma_mutex* pMutex) +{ + pthread_mutex_lock((pthread_mutex_t*)pMutex); +} + +static void ma_mutex_unlock__posix(ma_mutex* pMutex) +{ + pthread_mutex_unlock((pthread_mutex_t*)pMutex); +} + + +static ma_result ma_event_init__posix(ma_event* pEvent) +{ + int result; + + result = pthread_mutex_init((pthread_mutex_t*)&pEvent->lock, NULL); + if (result != 0) { + return ma_result_from_errno(result); + } + + result = pthread_cond_init((pthread_cond_t*)&pEvent->cond, NULL); + if (result != 0) { + pthread_mutex_destroy((pthread_mutex_t*)&pEvent->lock); + return ma_result_from_errno(result); + } + + pEvent->value = 0; + return MA_SUCCESS; +} + +static void ma_event_uninit__posix(ma_event* pEvent) +{ + pthread_cond_destroy((pthread_cond_t*)&pEvent->cond); + pthread_mutex_destroy((pthread_mutex_t*)&pEvent->lock); +} + +static ma_result ma_event_wait__posix(ma_event* pEvent) +{ + pthread_mutex_lock((pthread_mutex_t*)&pEvent->lock); + { + while (pEvent->value == 0) { + pthread_cond_wait((pthread_cond_t*)&pEvent->cond, (pthread_mutex_t*)&pEvent->lock); + } + pEvent->value = 0; /* Auto-reset. */ + } + pthread_mutex_unlock((pthread_mutex_t*)&pEvent->lock); + + return MA_SUCCESS; +} + +static ma_result ma_event_signal__posix(ma_event* pEvent) +{ + pthread_mutex_lock((pthread_mutex_t*)&pEvent->lock); + { + pEvent->value = 1; + pthread_cond_signal((pthread_cond_t*)&pEvent->cond); + } + pthread_mutex_unlock((pthread_mutex_t*)&pEvent->lock); + + return MA_SUCCESS; +} + + +static ma_result ma_semaphore_init__posix(int initialValue, ma_semaphore* pSemaphore) +{ + int result; + + if (pSemaphore == NULL) { + return MA_INVALID_ARGS; + } + + pSemaphore->value = initialValue; + + result = pthread_mutex_init((pthread_mutex_t*)&pSemaphore->lock, NULL); + if (result != 0) { + return ma_result_from_errno(result); /* Failed to create mutex. */ + } + + result = pthread_cond_init((pthread_cond_t*)&pSemaphore->cond, NULL); + if (result != 0) { + pthread_mutex_destroy((pthread_mutex_t*)&pSemaphore->lock); + return ma_result_from_errno(result); /* Failed to create condition variable. */ + } + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__posix(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return; + } + + pthread_cond_destroy((pthread_cond_t*)&pSemaphore->cond); + pthread_mutex_destroy((pthread_mutex_t*)&pSemaphore->lock); +} + +static ma_result ma_semaphore_wait__posix(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_INVALID_ARGS; + } + + pthread_mutex_lock((pthread_mutex_t*)&pSemaphore->lock); + { + /* We need to wait on a condition variable before escaping. We can't return from this function until the semaphore has been signaled. */ + while (pSemaphore->value == 0) { + pthread_cond_wait((pthread_cond_t*)&pSemaphore->cond, (pthread_mutex_t*)&pSemaphore->lock); + } + + pSemaphore->value -= 1; + } + pthread_mutex_unlock((pthread_mutex_t*)&pSemaphore->lock); + + return MA_SUCCESS; +} + +static ma_result ma_semaphore_release__posix(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_INVALID_ARGS; + } + + pthread_mutex_lock((pthread_mutex_t*)&pSemaphore->lock); + { + pSemaphore->value += 1; + pthread_cond_signal((pthread_cond_t*)&pSemaphore->cond); + } + pthread_mutex_unlock((pthread_mutex_t*)&pSemaphore->lock); + + return MA_SUCCESS; +} +#elif defined(MA_WIN32) +static int ma_thread_priority_to_win32(ma_thread_priority priority) +{ + switch (priority) { + case ma_thread_priority_idle: return THREAD_PRIORITY_IDLE; + case ma_thread_priority_lowest: return THREAD_PRIORITY_LOWEST; + case ma_thread_priority_low: return THREAD_PRIORITY_BELOW_NORMAL; + case ma_thread_priority_normal: return THREAD_PRIORITY_NORMAL; + case ma_thread_priority_high: return THREAD_PRIORITY_ABOVE_NORMAL; + case ma_thread_priority_highest: return THREAD_PRIORITY_HIGHEST; + case ma_thread_priority_realtime: return THREAD_PRIORITY_TIME_CRITICAL; + default: return THREAD_PRIORITY_NORMAL; + } +} + +static ma_result ma_thread_create__win32(ma_thread* pThread, ma_thread_priority priority, size_t stackSize, ma_thread_entry_proc entryProc, void* pData) +{ + DWORD threadID; /* Not used. Only used for passing into CreateThread() so it doesn't fail on Windows 98. */ + + *pThread = CreateThread(NULL, stackSize, entryProc, pData, 0, &threadID); + if (*pThread == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + SetThreadPriority((HANDLE)*pThread, ma_thread_priority_to_win32(priority)); + + return MA_SUCCESS; +} + +static void ma_thread_wait__win32(ma_thread* pThread) +{ + WaitForSingleObject((HANDLE)*pThread, INFINITE); + CloseHandle((HANDLE)*pThread); +} + + +static ma_result ma_mutex_init__win32(ma_mutex* pMutex) +{ + *pMutex = CreateEventA(NULL, FALSE, TRUE, NULL); + if (*pMutex == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_mutex_uninit__win32(ma_mutex* pMutex) +{ + CloseHandle((HANDLE)*pMutex); +} + +static void ma_mutex_lock__win32(ma_mutex* pMutex) +{ + WaitForSingleObject((HANDLE)*pMutex, INFINITE); +} + +static void ma_mutex_unlock__win32(ma_mutex* pMutex) +{ + SetEvent((HANDLE)*pMutex); +} + + +static ma_result ma_event_init__win32(ma_event* pEvent) +{ + *pEvent = CreateEventA(NULL, FALSE, FALSE, NULL); + if (*pEvent == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_event_uninit__win32(ma_event* pEvent) +{ + CloseHandle((HANDLE)*pEvent); +} + +static ma_result ma_event_wait__win32(ma_event* pEvent) +{ + DWORD result = WaitForSingleObject((HANDLE)*pEvent, INFINITE); + if (result == WAIT_OBJECT_0) { + return MA_SUCCESS; + } + + if (result == WAIT_TIMEOUT) { + return MA_TIMEOUT; + } + + return ma_result_from_GetLastError(GetLastError()); +} + +static ma_result ma_event_signal__win32(ma_event* pEvent) +{ + BOOL result = SetEvent((HANDLE)*pEvent); + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + + +static ma_result ma_semaphore_init__win32(int initialValue, ma_semaphore* pSemaphore) +{ + *pSemaphore = CreateSemaphoreW(NULL, (LONG)initialValue, LONG_MAX, NULL); + if (*pSemaphore == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__win32(ma_semaphore* pSemaphore) +{ + CloseHandle((HANDLE)*pSemaphore); +} + +static ma_result ma_semaphore_wait__win32(ma_semaphore* pSemaphore) +{ + DWORD result = WaitForSingleObject((HANDLE)*pSemaphore, INFINITE); + if (result == WAIT_OBJECT_0) { + return MA_SUCCESS; + } + + if (result == WAIT_TIMEOUT) { + return MA_TIMEOUT; + } + + return ma_result_from_GetLastError(GetLastError()); +} + +static ma_result ma_semaphore_release__win32(ma_semaphore* pSemaphore) +{ + BOOL result = ReleaseSemaphore((HANDLE)*pSemaphore, 1, NULL); + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} +#endif + +typedef struct +{ + ma_thread_entry_proc entryProc; + void* pData; + ma_allocation_callbacks allocationCallbacks; +} ma_thread_proxy_data; + +static ma_thread_result MA_THREADCALL ma_thread_entry_proxy(void* pData) +{ + ma_thread_proxy_data* pProxyData = (ma_thread_proxy_data*)pData; + ma_thread_entry_proc entryProc; + void* pEntryProcData; + ma_thread_result result; + +#if defined(MA_ON_THREAD_ENTRY) + MA_ON_THREAD_ENTRY +#endif + + entryProc = pProxyData->entryProc; + pEntryProcData = pProxyData->pData; + + /* Free the proxy data before getting into the real thread entry proc. */ + ma_free(pProxyData, &pProxyData->allocationCallbacks); + + result = entryProc(pEntryProcData); + +#if defined(MA_ON_THREAD_EXIT) + MA_ON_THREAD_EXIT +#endif + + return result; +} + +static ma_result ma_thread_create(ma_thread* pThread, ma_thread_priority priority, size_t stackSize, ma_thread_entry_proc entryProc, void* pData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_thread_proxy_data* pProxyData; + + if (pThread == NULL || entryProc == NULL) { + return MA_INVALID_ARGS; + } + + pProxyData = (ma_thread_proxy_data*)ma_malloc(sizeof(*pProxyData), pAllocationCallbacks); /* Will be freed by the proxy entry proc. */ + if (pProxyData == NULL) { + return MA_OUT_OF_MEMORY; + } + +#if defined(MA_THREAD_DEFAULT_STACK_SIZE) + if (stackSize == 0) { + stackSize = MA_THREAD_DEFAULT_STACK_SIZE; + } +#endif + + pProxyData->entryProc = entryProc; + pProxyData->pData = pData; + ma_allocation_callbacks_init_copy(&pProxyData->allocationCallbacks, pAllocationCallbacks); + +#if defined(MA_POSIX) + result = ma_thread_create__posix(pThread, priority, stackSize, ma_thread_entry_proxy, pProxyData); +#elif defined(MA_WIN32) + result = ma_thread_create__win32(pThread, priority, stackSize, ma_thread_entry_proxy, pProxyData); +#endif + + if (result != MA_SUCCESS) { + ma_free(pProxyData, pAllocationCallbacks); + return result; + } + + return MA_SUCCESS; +} + +static void ma_thread_wait(ma_thread* pThread) +{ + if (pThread == NULL) { + return; + } + +#if defined(MA_POSIX) + ma_thread_wait__posix(pThread); +#elif defined(MA_WIN32) + ma_thread_wait__win32(pThread); +#endif +} + + +MA_API ma_result ma_mutex_init(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_mutex_init__posix(pMutex); +#elif defined(MA_WIN32) + return ma_mutex_init__win32(pMutex); +#endif +} + +MA_API void ma_mutex_uninit(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + return; + } + +#if defined(MA_POSIX) + ma_mutex_uninit__posix(pMutex); +#elif defined(MA_WIN32) + ma_mutex_uninit__win32(pMutex); +#endif +} + +MA_API void ma_mutex_lock(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return; + } + +#if defined(MA_POSIX) + ma_mutex_lock__posix(pMutex); +#elif defined(MA_WIN32) + ma_mutex_lock__win32(pMutex); +#endif +} + +MA_API void ma_mutex_unlock(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return; + } + +#if defined(MA_POSIX) + ma_mutex_unlock__posix(pMutex); +#elif defined(MA_WIN32) + ma_mutex_unlock__win32(pMutex); +#endif +} + + +MA_API ma_result ma_event_init(ma_event* pEvent) +{ + if (pEvent == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_event_init__posix(pEvent); +#elif defined(MA_WIN32) + return ma_event_init__win32(pEvent); +#endif +} + +#if 0 +static ma_result ma_event_alloc_and_init(ma_event** ppEvent, ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_event* pEvent; + + if (ppEvent == NULL) { + return MA_INVALID_ARGS; + } + + *ppEvent = NULL; + + pEvent = ma_malloc(sizeof(*pEvent), pAllocationCallbacks); + if (pEvent == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_event_init(pEvent); + if (result != MA_SUCCESS) { + ma_free(pEvent, pAllocationCallbacks); + return result; + } + + *ppEvent = pEvent; + return result; +} +#endif + +MA_API void ma_event_uninit(ma_event* pEvent) +{ + if (pEvent == NULL) { + return; + } + +#if defined(MA_POSIX) + ma_event_uninit__posix(pEvent); +#elif defined(MA_WIN32) + ma_event_uninit__win32(pEvent); +#endif +} + +#if 0 +static void ma_event_uninit_and_free(ma_event* pEvent, ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pEvent == NULL) { + return; + } + + ma_event_uninit(pEvent); + ma_free(pEvent, pAllocationCallbacks); +} +#endif + +MA_API ma_result ma_event_wait(ma_event* pEvent) +{ + if (pEvent == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert to the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_event_wait__posix(pEvent); +#elif defined(MA_WIN32) + return ma_event_wait__win32(pEvent); +#endif +} + +MA_API ma_result ma_event_signal(ma_event* pEvent) +{ + if (pEvent == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert to the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_event_signal__posix(pEvent); +#elif defined(MA_WIN32) + return ma_event_signal__win32(pEvent); +#endif +} + + +MA_API ma_result ma_semaphore_init(int initialValue, ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_semaphore_init__posix(initialValue, pSemaphore); +#elif defined(MA_WIN32) + return ma_semaphore_init__win32(initialValue, pSemaphore); +#endif +} + +MA_API void ma_semaphore_uninit(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return; + } + +#if defined(MA_POSIX) + ma_semaphore_uninit__posix(pSemaphore); +#elif defined(MA_WIN32) + ma_semaphore_uninit__win32(pSemaphore); +#endif +} + +MA_API ma_result ma_semaphore_wait(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_semaphore_wait__posix(pSemaphore); +#elif defined(MA_WIN32) + return ma_semaphore_wait__win32(pSemaphore); +#endif +} + +MA_API ma_result ma_semaphore_release(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_semaphore_release__posix(pSemaphore); +#elif defined(MA_WIN32) + return ma_semaphore_release__win32(pSemaphore); +#endif +} +#else +/* MA_NO_THREADING is set which means threading is disabled. Threading is required by some API families. If any of these are enabled we need to throw an error. */ +#ifndef MA_NO_DEVICE_IO +#error "MA_NO_THREADING cannot be used without MA_NO_DEVICE_IO"; +#endif +#endif /* MA_NO_THREADING */ + + + +#define MA_FENCE_COUNTER_MAX 0x7FFFFFFF + +MA_API ma_result ma_fence_init(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFence); + pFence->counter = 0; + +#ifndef MA_NO_THREADING + { + ma_result result; + + result = ma_event_init(&pFence->e); + if (result != MA_SUCCESS) { + return result; + } + } +#endif + + return MA_SUCCESS; +} + +MA_API void ma_fence_uninit(ma_fence* pFence) +{ + if (pFence == NULL) { + return; + } + +#ifndef MA_NO_THREADING + { + ma_event_uninit(&pFence->e); + } +#endif + + MA_ZERO_OBJECT(pFence); +} + +MA_API ma_result ma_fence_acquire(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + ma_uint32 oldCounter = ma_atomic_load_32(&pFence->counter); + ma_uint32 newCounter = oldCounter + 1; + + /* Make sure we're not about to exceed our maximum value. */ + if (newCounter > MA_FENCE_COUNTER_MAX) { + MA_ASSERT(MA_FALSE); + return MA_OUT_OF_RANGE; + } + + if (ma_atomic_compare_exchange_weak_32(&pFence->counter, &oldCounter, newCounter)) { + return MA_SUCCESS; + } else { + if (oldCounter == MA_FENCE_COUNTER_MAX) { + MA_ASSERT(MA_FALSE); + return MA_OUT_OF_RANGE; /* The other thread took the last available slot. Abort. */ + } + } + } + + /* Should never get here. */ + /*return MA_SUCCESS;*/ +} + +MA_API ma_result ma_fence_release(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + ma_uint32 oldCounter = ma_atomic_load_32(&pFence->counter); + ma_uint32 newCounter = oldCounter - 1; + + if (oldCounter == 0) { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Acquire/release mismatch. */ + } + + if (ma_atomic_compare_exchange_weak_32(&pFence->counter, &oldCounter, newCounter)) { +#ifndef MA_NO_THREADING + { + if (newCounter == 0) { + ma_event_signal(&pFence->e); /* <-- ma_fence_wait() will be waiting on this. */ + } + } +#endif + + return MA_SUCCESS; + } else { + if (oldCounter == 0) { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Another thread has taken the 0 slot. Acquire/release mismatch. */ + } + } + } + + /* Should never get here. */ + /*return MA_SUCCESS;*/ +} + +MA_API ma_result ma_fence_wait(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + ma_uint32 counter; + + counter = ma_atomic_load_32(&pFence->counter); + if (counter == 0) { + /* + Counter has hit zero. By the time we get here some other thread may have acquired the + fence again, but that is where the caller needs to take care with how they se the fence. + */ + return MA_SUCCESS; + } + + /* Getting here means the counter is > 0. We'll need to wait for something to happen. */ +#ifndef MA_NO_THREADING + { + ma_result result; + + result = ma_event_wait(&pFence->e); + if (result != MA_SUCCESS) { + return result; + } + } +#endif + } + + /* Should never get here. */ + /*return MA_INVALID_OPERATION;*/ +} + + +MA_API ma_result ma_async_notification_signal(ma_async_notification* pNotification) +{ + ma_async_notification_callbacks* pNotificationCallbacks = (ma_async_notification_callbacks*)pNotification; + + if (pNotification == NULL) { + return MA_INVALID_ARGS; + } + + if (pNotificationCallbacks->onSignal == NULL) { + return MA_NOT_IMPLEMENTED; + } + + pNotificationCallbacks->onSignal(pNotification); + return MA_INVALID_ARGS; +} + + +static void ma_async_notification_poll__on_signal(ma_async_notification* pNotification) +{ + ((ma_async_notification_poll*)pNotification)->signalled = MA_TRUE; +} + +MA_API ma_result ma_async_notification_poll_init(ma_async_notification_poll* pNotificationPoll) +{ + if (pNotificationPoll == NULL) { + return MA_INVALID_ARGS; + } + + pNotificationPoll->cb.onSignal = ma_async_notification_poll__on_signal; + pNotificationPoll->signalled = MA_FALSE; + + return MA_SUCCESS; +} + +MA_API ma_bool32 ma_async_notification_poll_is_signalled(const ma_async_notification_poll* pNotificationPoll) +{ + if (pNotificationPoll == NULL) { + return MA_FALSE; + } + + return pNotificationPoll->signalled; +} + + +static void ma_async_notification_event__on_signal(ma_async_notification* pNotification) +{ + ma_async_notification_event_signal((ma_async_notification_event*)pNotification); +} + +MA_API ma_result ma_async_notification_event_init(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + + pNotificationEvent->cb.onSignal = ma_async_notification_event__on_signal; + +#ifndef MA_NO_THREADING + { + ma_result result; + + result = ma_event_init(&pNotificationEvent->e); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; + } +#else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } +#endif +} + +MA_API ma_result ma_async_notification_event_uninit(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + +#ifndef MA_NO_THREADING + { + ma_event_uninit(&pNotificationEvent->e); + return MA_SUCCESS; + } +#else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } +#endif +} + +MA_API ma_result ma_async_notification_event_wait(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + +#ifndef MA_NO_THREADING + { + return ma_event_wait(&pNotificationEvent->e); + } +#else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } +#endif +} + +MA_API ma_result ma_async_notification_event_signal(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + +#ifndef MA_NO_THREADING + { + return ma_event_signal(&pNotificationEvent->e); + } +#else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } +#endif +} + + + +/************************************************************************************************************************************************************ + +Job Queue + +************************************************************************************************************************************************************/ +MA_API ma_slot_allocator_config ma_slot_allocator_config_init(ma_uint32 capacity) +{ + ma_slot_allocator_config config; + + MA_ZERO_OBJECT(&config); + config.capacity = capacity; + + return config; +} + + +static MA_INLINE ma_uint32 ma_slot_allocator_calculate_group_capacity(ma_uint32 slotCapacity) +{ + ma_uint32 cap = slotCapacity / 32; + if ((slotCapacity % 32) != 0) { + cap += 1; + } + + return cap; +} + +static MA_INLINE ma_uint32 ma_slot_allocator_group_capacity(const ma_slot_allocator* pAllocator) +{ + return ma_slot_allocator_calculate_group_capacity(pAllocator->capacity); +} + + +typedef struct +{ + size_t sizeInBytes; + size_t groupsOffset; + size_t slotsOffset; +} ma_slot_allocator_heap_layout; + +static ma_result ma_slot_allocator_get_heap_layout(const ma_slot_allocator_config* pConfig, ma_slot_allocator_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->capacity == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Groups. */ + pHeapLayout->groupsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(ma_slot_allocator_calculate_group_capacity(pConfig->capacity) * sizeof(ma_slot_allocator_group)); + + /* Slots. */ + pHeapLayout->slotsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(pConfig->capacity * sizeof(ma_uint32)); + + return MA_SUCCESS; +} + +MA_API ma_result ma_slot_allocator_get_heap_size(const ma_slot_allocator_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_slot_allocator_heap_layout layout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_slot_allocator_get_heap_layout(pConfig, &layout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = layout.sizeInBytes; + + return result; +} + +MA_API ma_result ma_slot_allocator_init_preallocated(const ma_slot_allocator_config* pConfig, void* pHeap, ma_slot_allocator* pAllocator) +{ + ma_result result; + ma_slot_allocator_heap_layout heapLayout; + + if (pAllocator == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pAllocator); + + if (pHeap == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_slot_allocator_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pAllocator->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pAllocator->pGroups = (ma_slot_allocator_group*)ma_offset_ptr(pHeap, heapLayout.groupsOffset); + pAllocator->pSlots = (ma_uint32*)ma_offset_ptr(pHeap, heapLayout.slotsOffset); + pAllocator->capacity = pConfig->capacity; + + return MA_SUCCESS; +} + +MA_API ma_result ma_slot_allocator_init(const ma_slot_allocator_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_slot_allocator* pAllocator) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_slot_allocator_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap allocation. */ + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_slot_allocator_init_preallocated(pConfig, pHeap, pAllocator); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pAllocator->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_slot_allocator_uninit(ma_slot_allocator* pAllocator, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocator == NULL) { + return; + } + + if (pAllocator->_ownsHeap) { + ma_free(pAllocator->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_slot_allocator_alloc(ma_slot_allocator* pAllocator, ma_uint64* pSlot) +{ + ma_uint32 iAttempt; + const ma_uint32 maxAttempts = 2; /* The number of iterations to perform until returning MA_OUT_OF_MEMORY if no slots can be found. */ + + if (pAllocator == NULL || pSlot == NULL) { + return MA_INVALID_ARGS; + } + + for (iAttempt = 0; iAttempt < maxAttempts; iAttempt += 1) { + /* We need to acquire a suitable bitfield first. This is a bitfield that's got an available slot within it. */ + ma_uint32 iGroup; + for (iGroup = 0; iGroup < ma_slot_allocator_group_capacity(pAllocator); iGroup += 1) { + /* CAS */ + for (;;) { + ma_uint32 oldBitfield; + ma_uint32 newBitfield; + ma_uint32 bitOffset; + + oldBitfield = ma_atomic_load_32(&pAllocator->pGroups[iGroup].bitfield); /* <-- This copy must happen. The compiler must not optimize this away. */ + + /* Fast check to see if anything is available. */ + if (oldBitfield == 0xFFFFFFFF) { + break; /* No available bits in this bitfield. */ + } + + bitOffset = ma_ffs_32(~oldBitfield); + MA_ASSERT(bitOffset < 32); + + newBitfield = oldBitfield | (1 << bitOffset); + + if (ma_atomic_compare_and_swap_32(&pAllocator->pGroups[iGroup].bitfield, oldBitfield, newBitfield) == oldBitfield) { + ma_uint32 slotIndex; + + /* Increment the counter as soon as possible to have other threads report out-of-memory sooner than later. */ + ma_atomic_fetch_add_32(&pAllocator->count, 1); + + /* The slot index is required for constructing the output value. */ + slotIndex = (iGroup << 5) + bitOffset; /* iGroup << 5 = iGroup * 32 */ + if (slotIndex >= pAllocator->capacity) { + return MA_OUT_OF_MEMORY; + } + + /* Increment the reference count before constructing the output value. */ + pAllocator->pSlots[slotIndex] += 1; + + /* Construct the output value. */ + *pSlot = (((ma_uint64)pAllocator->pSlots[slotIndex] << 32) | slotIndex); + + return MA_SUCCESS; + } + } + } + + /* We weren't able to find a slot. If it's because we've reached our capacity we need to return MA_OUT_OF_MEMORY. Otherwise we need to do another iteration and try again. */ + if (pAllocator->count < pAllocator->capacity) { + ma_yield(); + } else { + return MA_OUT_OF_MEMORY; + } + } + + /* We couldn't find a slot within the maximum number of attempts. */ + return MA_OUT_OF_MEMORY; +} + +MA_API ma_result ma_slot_allocator_free(ma_slot_allocator* pAllocator, ma_uint64 slot) +{ + ma_uint32 iGroup; + ma_uint32 iBit; + + if (pAllocator == NULL) { + return MA_INVALID_ARGS; + } + + iGroup = (ma_uint32)((slot & 0xFFFFFFFF) >> 5); /* slot / 32 */ + iBit = (ma_uint32)((slot & 0xFFFFFFFF) & 31); /* slot % 32 */ + + if (iGroup >= ma_slot_allocator_group_capacity(pAllocator)) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(iBit < 32); /* This must be true due to the logic we used to actually calculate it. */ + + while (ma_atomic_load_32(&pAllocator->count) > 0) { + /* CAS */ + ma_uint32 oldBitfield; + ma_uint32 newBitfield; + + oldBitfield = ma_atomic_load_32(&pAllocator->pGroups[iGroup].bitfield); /* <-- This copy must happen. The compiler must not optimize this away. */ + newBitfield = oldBitfield & ~(1 << iBit); + + /* Debugging for checking for double-frees. */ +#if defined(MA_DEBUG_OUTPUT) + { + if ((oldBitfield & (1 << iBit)) == 0) { + MA_ASSERT(MA_FALSE); /* Double free detected.*/ + } + } +#endif + + if (ma_atomic_compare_and_swap_32(&pAllocator->pGroups[iGroup].bitfield, oldBitfield, newBitfield) == oldBitfield) { + ma_atomic_fetch_sub_32(&pAllocator->count, 1); + return MA_SUCCESS; + } + } + + /* Getting here means there are no allocations available for freeing. */ + return MA_INVALID_OPERATION; +} + + +#define MA_JOB_ID_NONE ~((ma_uint64)0) +#define MA_JOB_SLOT_NONE (ma_uint16)(~0) + +static MA_INLINE ma_uint32 ma_job_extract_refcount(ma_uint64 toc) +{ + return (ma_uint32)(toc >> 32); +} + +static MA_INLINE ma_uint16 ma_job_extract_slot(ma_uint64 toc) +{ + return (ma_uint16)(toc & 0x0000FFFF); +} + +static MA_INLINE ma_uint16 ma_job_extract_code(ma_uint64 toc) +{ + return (ma_uint16)((toc & 0xFFFF0000) >> 16); +} + +static MA_INLINE ma_uint64 ma_job_toc_to_allocation(ma_uint64 toc) +{ + return ((ma_uint64)ma_job_extract_refcount(toc) << 32) | (ma_uint64)ma_job_extract_slot(toc); +} + +static MA_INLINE ma_uint64 ma_job_set_refcount(ma_uint64 toc, ma_uint32 refcount) +{ + /* Clear the reference count first. */ + toc = toc & ~((ma_uint64)0xFFFFFFFF << 32); + toc = toc | ((ma_uint64)refcount << 32); + + return toc; +} + + +MA_API ma_job ma_job_init(ma_uint16 code) +{ + ma_job job; + + MA_ZERO_OBJECT(&job); + job.toc.breakup.code = code; + job.toc.breakup.slot = MA_JOB_SLOT_NONE; /* Temp value. Will be allocated when posted to a queue. */ + job.next = MA_JOB_ID_NONE; + + return job; +} + + +static ma_result ma_job_process__noop(ma_job* pJob); +static ma_result ma_job_process__quit(ma_job* pJob); +static ma_result ma_job_process__custom(ma_job* pJob); +static ma_result ma_job_process__resource_manager__load_data_buffer_node(ma_job* pJob); +static ma_result ma_job_process__resource_manager__free_data_buffer_node(ma_job* pJob); +static ma_result ma_job_process__resource_manager__page_data_buffer_node(ma_job* pJob); +static ma_result ma_job_process__resource_manager__load_data_buffer(ma_job* pJob); +static ma_result ma_job_process__resource_manager__free_data_buffer(ma_job* pJob); +static ma_result ma_job_process__resource_manager__load_data_stream(ma_job* pJob); +static ma_result ma_job_process__resource_manager__free_data_stream(ma_job* pJob); +static ma_result ma_job_process__resource_manager__page_data_stream(ma_job* pJob); +static ma_result ma_job_process__resource_manager__seek_data_stream(ma_job* pJob); + +#if !defined(MA_NO_DEVICE_IO) +static ma_result ma_job_process__device__aaudio_reroute(ma_job* pJob); +#endif + +static ma_job_proc g_jobVTable[MA_JOB_TYPE_COUNT] = +{ + /* Miscellaneous. */ + ma_job_process__quit, /* MA_JOB_TYPE_QUIT */ + ma_job_process__custom, /* MA_JOB_TYPE_CUSTOM */ + + /* Resource Manager. */ + ma_job_process__resource_manager__load_data_buffer_node, /* MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE */ + ma_job_process__resource_manager__free_data_buffer_node, /* MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE */ + ma_job_process__resource_manager__page_data_buffer_node, /* MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE */ + ma_job_process__resource_manager__load_data_buffer, /* MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER */ + ma_job_process__resource_manager__free_data_buffer, /* MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER */ + ma_job_process__resource_manager__load_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM */ + ma_job_process__resource_manager__free_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM */ + ma_job_process__resource_manager__page_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_STREAM */ + ma_job_process__resource_manager__seek_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_SEEK_DATA_STREAM */ + + /* Device. */ +#if !defined(MA_NO_DEVICE_IO) + ma_job_process__device__aaudio_reroute /*MA_JOB_TYPE_DEVICE_AAUDIO_REROUTE*/ +#endif +}; + +MA_API ma_result ma_job_process(ma_job* pJob) +{ + if (pJob == NULL) { + return MA_INVALID_ARGS; + } + + if (pJob->toc.breakup.code >= MA_JOB_TYPE_COUNT) { + return MA_INVALID_OPERATION; + } + + return g_jobVTable[pJob->toc.breakup.code](pJob); +} + +static ma_result ma_job_process__noop(ma_job* pJob) +{ + MA_ASSERT(pJob != NULL); + + /* No-op. */ + (void)pJob; + + return MA_SUCCESS; +} + +static ma_result ma_job_process__quit(ma_job* pJob) +{ + return ma_job_process__noop(pJob); +} + +static ma_result ma_job_process__custom(ma_job* pJob) +{ + MA_ASSERT(pJob != NULL); + + /* No-op if there's no callback. */ + if (pJob->data.custom.proc == NULL) { + return MA_SUCCESS; + } + + return pJob->data.custom.proc(pJob); +} + + + +MA_API ma_job_queue_config ma_job_queue_config_init(ma_uint32 flags, ma_uint32 capacity) +{ + ma_job_queue_config config; + + config.flags = flags; + config.capacity = capacity; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t allocatorOffset; + size_t jobsOffset; +} ma_job_queue_heap_layout; + +static ma_result ma_job_queue_get_heap_layout(const ma_job_queue_config* pConfig, ma_job_queue_heap_layout* pHeapLayout) +{ + ma_result result; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->capacity == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Allocator. */ + { + ma_slot_allocator_config allocatorConfig; + size_t allocatorHeapSizeInBytes; + + allocatorConfig = ma_slot_allocator_config_init(pConfig->capacity); + result = ma_slot_allocator_get_heap_size(&allocatorConfig, &allocatorHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->allocatorOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += allocatorHeapSizeInBytes; + } + + /* Jobs. */ + pHeapLayout->jobsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(pConfig->capacity * sizeof(ma_job)); + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_get_heap_size(const ma_job_queue_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_job_queue_heap_layout layout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_job_queue_get_heap_layout(pConfig, &layout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = layout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_init_preallocated(const ma_job_queue_config* pConfig, void* pHeap, ma_job_queue* pQueue) +{ + ma_result result; + ma_job_queue_heap_layout heapLayout; + ma_slot_allocator_config allocatorConfig; + + if (pQueue == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pQueue); + + result = ma_job_queue_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pQueue->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pQueue->flags = pConfig->flags; + pQueue->capacity = pConfig->capacity; + pQueue->pJobs = (ma_job*)ma_offset_ptr(pHeap, heapLayout.jobsOffset); + + allocatorConfig = ma_slot_allocator_config_init(pConfig->capacity); + result = ma_slot_allocator_init_preallocated(&allocatorConfig, ma_offset_ptr(pHeap, heapLayout.allocatorOffset), &pQueue->allocator); + if (result != MA_SUCCESS) { + return result; + } + + /* We need a semaphore if we're running in non-blocking mode. If threading is disabled we need to return an error. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { +#ifndef MA_NO_THREADING + { + ma_semaphore_init(0, &pQueue->sem); + } +#else + { + /* Threading is disabled and we've requested non-blocking mode. */ + return MA_INVALID_OPERATION; + } +#endif + } + + /* + Our queue needs to be initialized with a free standing node. This should always be slot 0. Required for the lock free algorithm. The first job in the queue is + just a dummy item for giving us the first item in the list which is stored in the "next" member. + */ + ma_slot_allocator_alloc(&pQueue->allocator, &pQueue->head); /* Will never fail. */ + pQueue->pJobs[ma_job_extract_slot(pQueue->head)].next = MA_JOB_ID_NONE; + pQueue->tail = pQueue->head; + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_init(const ma_job_queue_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_job_queue* pQueue) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_job_queue_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_job_queue_init_preallocated(pConfig, pHeap, pQueue); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pQueue->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_job_queue_uninit(ma_job_queue* pQueue, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pQueue == NULL) { + return; + } + + /* All we need to do is uninitialize the semaphore. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { +#ifndef MA_NO_THREADING + { + ma_semaphore_uninit(&pQueue->sem); + } +#else + { + MA_ASSERT(MA_FALSE); /* Should never get here. Should have been checked at initialization time. */ + } +#endif + } + + ma_slot_allocator_uninit(&pQueue->allocator, pAllocationCallbacks); + + if (pQueue->_ownsHeap) { + ma_free(pQueue->_pHeap, pAllocationCallbacks); + } +} + +static ma_bool32 ma_job_queue_cas(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) +{ + /* The new counter is taken from the expected value. */ + return ma_atomic_compare_and_swap_64(dst, expected, ma_job_set_refcount(desired, ma_job_extract_refcount(expected) + 1)) == expected; +} + +MA_API ma_result ma_job_queue_post(ma_job_queue* pQueue, const ma_job* pJob) +{ + /* + Lock free queue implementation based on the paper by Michael and Scott: Nonblocking Algorithms and Preemption-Safe Locking on Multiprogrammed Shared Memory Multiprocessors + */ + ma_result result; + ma_uint64 slot; + ma_uint64 tail; + ma_uint64 next; + + if (pQueue == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + /* We need a new slot. */ + result = ma_slot_allocator_alloc(&pQueue->allocator, &slot); + if (result != MA_SUCCESS) { + return result; /* Probably ran out of slots. If so, MA_OUT_OF_MEMORY will be returned. */ + } + + /* At this point we should have a slot to place the job. */ + MA_ASSERT(ma_job_extract_slot(slot) < pQueue->capacity); + + /* We need to put the job into memory before we do anything. */ + pQueue->pJobs[ma_job_extract_slot(slot)] = *pJob; + pQueue->pJobs[ma_job_extract_slot(slot)].toc.allocation = slot; /* This will overwrite the job code. */ + pQueue->pJobs[ma_job_extract_slot(slot)].toc.breakup.code = pJob->toc.breakup.code; /* The job code needs to be applied again because the line above overwrote it. */ + pQueue->pJobs[ma_job_extract_slot(slot)].next = MA_JOB_ID_NONE; /* Reset for safety. */ + +#ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_lock(&pQueue->lock); +#endif + { + /* The job is stored in memory so now we need to add it to our linked list. We only ever add items to the end of the list. */ + for (;;) { + tail = ma_atomic_load_64(&pQueue->tail); + next = ma_atomic_load_64(&pQueue->pJobs[ma_job_extract_slot(tail)].next); + + if (ma_job_toc_to_allocation(tail) == ma_job_toc_to_allocation(ma_atomic_load_64(&pQueue->tail))) { + if (ma_job_extract_slot(next) == 0xFFFF) { + if (ma_job_queue_cas(&pQueue->pJobs[ma_job_extract_slot(tail)].next, next, slot)) { + break; + } + } else { + ma_job_queue_cas(&pQueue->tail, tail, ma_job_extract_slot(next)); + } + } + } + ma_job_queue_cas(&pQueue->tail, tail, slot); + } +#ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_unlock(&pQueue->lock); +#endif + + + /* Signal the semaphore as the last step if we're using synchronous mode. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { +#ifndef MA_NO_THREADING + { + ma_semaphore_release(&pQueue->sem); + } +#else + { + MA_ASSERT(MA_FALSE); /* Should never get here. Should have been checked at initialization time. */ + } +#endif + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_next(ma_job_queue* pQueue, ma_job* pJob) +{ + ma_uint64 head; + ma_uint64 tail; + ma_uint64 next; + + if (pQueue == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + /* If we're running in synchronous mode we'll need to wait on a semaphore. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { +#ifndef MA_NO_THREADING + { + ma_semaphore_wait(&pQueue->sem); + } +#else + { + MA_ASSERT(MA_FALSE); /* Should never get here. Should have been checked at initialization time. */ + } +#endif + } + +#ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_lock(&pQueue->lock); +#endif + { + /* + BUG: In lock-free mode, multiple threads can be in this section of code. The "head" variable in the loop below + is stored. One thread can fall through to the freeing of this item while another is still using "head" for the + retrieval of the "next" variable. + + The slot allocator might need to make use of some reference counting to ensure it's only truely freed when + there are no more references to the item. This must be fixed before removing these locks. + */ + + /* Now we need to remove the root item from the list. */ + for (;;) { + head = ma_atomic_load_64(&pQueue->head); + tail = ma_atomic_load_64(&pQueue->tail); + next = ma_atomic_load_64(&pQueue->pJobs[ma_job_extract_slot(head)].next); + + if (ma_job_toc_to_allocation(head) == ma_job_toc_to_allocation(ma_atomic_load_64(&pQueue->head))) { + if (ma_job_extract_slot(head) == ma_job_extract_slot(tail)) { + if (ma_job_extract_slot(next) == 0xFFFF) { +#ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_unlock(&pQueue->lock); +#endif + return MA_NO_DATA_AVAILABLE; + } + ma_job_queue_cas(&pQueue->tail, tail, ma_job_extract_slot(next)); + } else { + *pJob = pQueue->pJobs[ma_job_extract_slot(next)]; + if (ma_job_queue_cas(&pQueue->head, head, ma_job_extract_slot(next))) { + break; + } + } + } + } + } +#ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_unlock(&pQueue->lock); +#endif + + ma_slot_allocator_free(&pQueue->allocator, head); + + /* + If it's a quit job make sure it's put back on the queue to ensure other threads have an opportunity to detect it and terminate naturally. We + could instead just leave it on the queue, but that would involve fiddling with the lock-free code above and I want to keep that as simple as + possible. + */ + if (pJob->toc.breakup.code == MA_JOB_TYPE_QUIT) { + ma_job_queue_post(pQueue, pJob); + return MA_CANCELLED; /* Return a cancelled status just in case the thread is checking return codes and not properly checking for a quit job. */ + } + + return MA_SUCCESS; +} + + + +/******************************************************************************* + +Dynamic Linking + +*******************************************************************************/ +#ifdef MA_POSIX +/* No need for dlfcn.h if we're not using runtime linking. */ +#ifndef MA_NO_RUNTIME_LINKING +#include +#endif +#endif + +MA_API ma_handle ma_dlopen(ma_log* pLog, const char* filename) +{ +#ifndef MA_NO_RUNTIME_LINKING + ma_handle handle; + + ma_log_postf(pLog, MA_LOG_LEVEL_DEBUG, "Loading library: %s\n", filename); + +#ifdef MA_WIN32 + /* From MSDN: Desktop applications cannot use LoadPackagedLibrary; if a desktop application calls this function it fails with APPMODEL_ERROR_NO_PACKAGE.*/ +#if !defined(MA_WIN32_UWP) + handle = (ma_handle)LoadLibraryA(filename); +#else + /* *sigh* It appears there is no ANSI version of LoadPackagedLibrary()... */ + WCHAR filenameW[4096]; + if (MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameW, sizeof(filenameW)) == 0) { + handle = NULL; + } else { + handle = (ma_handle)LoadPackagedLibrary(filenameW, 0); + } +#endif +#else + handle = (ma_handle)dlopen(filename, RTLD_NOW); +#endif + + /* + I'm not considering failure to load a library an error nor a warning because seamlessly falling through to a lower-priority + backend is a deliberate design choice. Instead I'm logging it as an informational message. + */ + if (handle == NULL) { + ma_log_postf(pLog, MA_LOG_LEVEL_INFO, "Failed to load library: %s\n", filename); + } + + return handle; +#else + /* Runtime linking is disabled. */ + (void)pLog; + (void)filename; + return NULL; +#endif +} + +MA_API void ma_dlclose(ma_log* pLog, ma_handle handle) +{ +#ifndef MA_NO_RUNTIME_LINKING +#ifdef MA_WIN32 + FreeLibrary((HMODULE)handle); +#else + dlclose((void*)handle); +#endif + + (void)pLog; +#else + /* Runtime linking is disabled. */ + (void)pLog; + (void)handle; +#endif +} + +MA_API ma_proc ma_dlsym(ma_log* pLog, ma_handle handle, const char* symbol) +{ +#ifndef MA_NO_RUNTIME_LINKING + ma_proc proc; + + ma_log_postf(pLog, MA_LOG_LEVEL_DEBUG, "Loading symbol: %s\n", symbol); + +#ifdef _WIN32 + proc = (ma_proc)GetProcAddress((HMODULE)handle, symbol); +#else +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" +#endif + proc = (ma_proc)dlsym((void*)handle, symbol); +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +#pragma GCC diagnostic pop +#endif +#endif + + if (proc == NULL) { + ma_log_postf(pLog, MA_LOG_LEVEL_WARNING, "Failed to load symbol: %s\n", symbol); + } + + (void)pLog; /* It's possible for pContext to be unused. */ + return proc; +#else + /* Runtime linking is disabled. */ + (void)pLog; + (void)handle; + (void)symbol; + return NULL; +#endif +} + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DEVICE I/O +========== + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ + +/* Disable run-time linking on certain backends and platforms. */ +#ifndef MA_NO_RUNTIME_LINKING +#if defined(MA_EMSCRIPTEN) || defined(MA_ORBIS) || defined(MA_PROSPERO) +#define MA_NO_RUNTIME_LINKING +#endif +#endif + +#ifndef MA_NO_DEVICE_IO + +#if defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200) +#include /* For mach_absolute_time() */ +#endif + +#ifdef MA_POSIX +#include +#include + +/* No need for dlfcn.h if we're not using runtime linking. */ +#ifndef MA_NO_RUNTIME_LINKING +#include +#endif +#endif + + + +MA_API void ma_device_info_add_native_data_format(ma_device_info* pDeviceInfo, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 flags) +{ + if (pDeviceInfo == NULL) { + return; + } + + if (pDeviceInfo->nativeDataFormatCount < ma_countof(pDeviceInfo->nativeDataFormats)) { + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags; + pDeviceInfo->nativeDataFormatCount += 1; + } +} + + +typedef struct +{ + ma_backend backend; + const char* pName; +} ma_backend_info; + +static ma_backend_info gBackendInfo[] = /* Indexed by the backend enum. Must be in the order backends are declared in the ma_backend enum. */ +{ + {ma_backend_wasapi, "WASAPI"}, + {ma_backend_dsound, "DirectSound"}, + {ma_backend_winmm, "WinMM"}, + {ma_backend_coreaudio, "Core Audio"}, + {ma_backend_sndio, "sndio"}, + {ma_backend_audio4, "audio(4)"}, + {ma_backend_oss, "OSS"}, + {ma_backend_pulseaudio, "PulseAudio"}, + {ma_backend_alsa, "ALSA"}, + {ma_backend_jack, "JACK"}, + {ma_backend_aaudio, "AAudio"}, + {ma_backend_opensl, "OpenSL|ES"}, + {ma_backend_webaudio, "Web Audio"}, + {ma_backend_custom, "Custom"}, + {ma_backend_null, "Null"} +}; + +MA_API const char* ma_get_backend_name(ma_backend backend) +{ + if (backend < 0 || backend >= (int)ma_countof(gBackendInfo)) { + return "Unknown"; + } + + return gBackendInfo[backend].pName; +} + +MA_API ma_result ma_get_backend_from_name(const char* pBackendName, ma_backend* pBackend) +{ + size_t iBackend; + + if (pBackendName == NULL) { + return MA_INVALID_ARGS; + } + + for (iBackend = 0; iBackend < ma_countof(gBackendInfo); iBackend += 1) { + if (ma_strcmp(pBackendName, gBackendInfo[iBackend].pName) == 0) { + if (pBackend != NULL) { + *pBackend = gBackendInfo[iBackend].backend; + } + + return MA_SUCCESS; + } + } + + /* Getting here means the backend name is unknown. */ + return MA_INVALID_ARGS; +} + +MA_API ma_bool32 ma_is_backend_enabled(ma_backend backend) +{ + /* + This looks a little bit gross, but we want all backends to be included in the switch to avoid warnings on some compilers + about some enums not being handled by the switch statement. + */ + switch (backend) + { + case ma_backend_wasapi: +#if defined(MA_HAS_WASAPI) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_dsound: +#if defined(MA_HAS_DSOUND) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_winmm: +#if defined(MA_HAS_WINMM) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_coreaudio: +#if defined(MA_HAS_COREAUDIO) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_sndio: +#if defined(MA_HAS_SNDIO) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_audio4: +#if defined(MA_HAS_AUDIO4) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_oss: +#if defined(MA_HAS_OSS) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_pulseaudio: +#if defined(MA_HAS_PULSEAUDIO) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_alsa: +#if defined(MA_HAS_ALSA) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_jack: +#if defined(MA_HAS_JACK) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_aaudio: +#if defined(MA_HAS_AAUDIO) +#if defined(MA_ANDROID) + { + return ma_android_sdk_version() >= 26; + } +#else + return MA_FALSE; +#endif +#else + return MA_FALSE; +#endif + case ma_backend_opensl: +#if defined(MA_HAS_OPENSL) +#if defined(MA_ANDROID) + { + return ma_android_sdk_version() >= 9; + } +#else + return MA_TRUE; +#endif +#else + return MA_FALSE; +#endif + case ma_backend_webaudio: +#if defined(MA_HAS_WEBAUDIO) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_custom: +#if defined(MA_HAS_CUSTOM) + return MA_TRUE; +#else + return MA_FALSE; +#endif + case ma_backend_null: +#if defined(MA_HAS_NULL) + return MA_TRUE; +#else + return MA_FALSE; +#endif + + default: return MA_FALSE; + } +} + +MA_API ma_result ma_get_enabled_backends(ma_backend* pBackends, size_t backendCap, size_t* pBackendCount) +{ + size_t backendCount; + size_t iBackend; + ma_result result = MA_SUCCESS; + + if (pBackendCount == NULL) { + return MA_INVALID_ARGS; + } + + backendCount = 0; + + for (iBackend = 0; iBackend <= ma_backend_null; iBackend += 1) { + ma_backend backend = (ma_backend)iBackend; + + if (ma_is_backend_enabled(backend)) { + /* The backend is enabled. Try adding it to the list. If there's no room, MA_NO_SPACE needs to be returned. */ + if (backendCount == backendCap) { + result = MA_NO_SPACE; + break; + } else { + pBackends[backendCount] = backend; + backendCount += 1; + } + } + } + + if (pBackendCount != NULL) { + *pBackendCount = backendCount; + } + + return result; +} + +MA_API ma_bool32 ma_is_loopback_supported(ma_backend backend) +{ + switch (backend) + { + case ma_backend_wasapi: return MA_TRUE; + case ma_backend_dsound: return MA_FALSE; + case ma_backend_winmm: return MA_FALSE; + case ma_backend_coreaudio: return MA_FALSE; + case ma_backend_sndio: return MA_FALSE; + case ma_backend_audio4: return MA_FALSE; + case ma_backend_oss: return MA_FALSE; + case ma_backend_pulseaudio: return MA_FALSE; + case ma_backend_alsa: return MA_FALSE; + case ma_backend_jack: return MA_FALSE; + case ma_backend_aaudio: return MA_FALSE; + case ma_backend_opensl: return MA_FALSE; + case ma_backend_webaudio: return MA_FALSE; + case ma_backend_custom: return MA_FALSE; /* <-- Will depend on the implementation of the backend. */ + case ma_backend_null: return MA_FALSE; + default: return MA_FALSE; + } +} + + + +#if defined(MA_WIN32) +/* WASAPI error codes. */ +#define MA_AUDCLNT_E_NOT_INITIALIZED ((HRESULT)0x88890001) +#define MA_AUDCLNT_E_ALREADY_INITIALIZED ((HRESULT)0x88890002) +#define MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE ((HRESULT)0x88890003) +#define MA_AUDCLNT_E_DEVICE_INVALIDATED ((HRESULT)0x88890004) +#define MA_AUDCLNT_E_NOT_STOPPED ((HRESULT)0x88890005) +#define MA_AUDCLNT_E_BUFFER_TOO_LARGE ((HRESULT)0x88890006) +#define MA_AUDCLNT_E_OUT_OF_ORDER ((HRESULT)0x88890007) +#define MA_AUDCLNT_E_UNSUPPORTED_FORMAT ((HRESULT)0x88890008) +#define MA_AUDCLNT_E_INVALID_SIZE ((HRESULT)0x88890009) +#define MA_AUDCLNT_E_DEVICE_IN_USE ((HRESULT)0x8889000A) +#define MA_AUDCLNT_E_BUFFER_OPERATION_PENDING ((HRESULT)0x8889000B) +#define MA_AUDCLNT_E_THREAD_NOT_REGISTERED ((HRESULT)0x8889000C) +#define MA_AUDCLNT_E_NO_SINGLE_PROCESS ((HRESULT)0x8889000D) +#define MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED ((HRESULT)0x8889000E) +#define MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED ((HRESULT)0x8889000F) +#define MA_AUDCLNT_E_SERVICE_NOT_RUNNING ((HRESULT)0x88890010) +#define MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED ((HRESULT)0x88890011) +#define MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY ((HRESULT)0x88890012) +#define MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL ((HRESULT)0x88890013) +#define MA_AUDCLNT_E_EVENTHANDLE_NOT_SET ((HRESULT)0x88890014) +#define MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE ((HRESULT)0x88890015) +#define MA_AUDCLNT_E_BUFFER_SIZE_ERROR ((HRESULT)0x88890016) +#define MA_AUDCLNT_E_CPUUSAGE_EXCEEDED ((HRESULT)0x88890017) +#define MA_AUDCLNT_E_BUFFER_ERROR ((HRESULT)0x88890018) +#define MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED ((HRESULT)0x88890019) +#define MA_AUDCLNT_E_INVALID_DEVICE_PERIOD ((HRESULT)0x88890020) +#define MA_AUDCLNT_E_INVALID_STREAM_FLAG ((HRESULT)0x88890021) +#define MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE ((HRESULT)0x88890022) +#define MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES ((HRESULT)0x88890023) +#define MA_AUDCLNT_E_OFFLOAD_MODE_ONLY ((HRESULT)0x88890024) +#define MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY ((HRESULT)0x88890025) +#define MA_AUDCLNT_E_RESOURCES_INVALIDATED ((HRESULT)0x88890026) +#define MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED ((HRESULT)0x88890027) +#define MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED ((HRESULT)0x88890028) +#define MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED ((HRESULT)0x88890029) +#define MA_AUDCLNT_E_HEADTRACKING_ENABLED ((HRESULT)0x88890030) +#define MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED ((HRESULT)0x88890040) +#define MA_AUDCLNT_S_BUFFER_EMPTY ((HRESULT)0x08890001) +#define MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED ((HRESULT)0x08890002) +#define MA_AUDCLNT_S_POSITION_STALLED ((HRESULT)0x08890003) + +#define MA_DS_OK ((HRESULT)0) +#define MA_DS_NO_VIRTUALIZATION ((HRESULT)0x0878000A) +#define MA_DSERR_ALLOCATED ((HRESULT)0x8878000A) +#define MA_DSERR_CONTROLUNAVAIL ((HRESULT)0x8878001E) +#define MA_DSERR_INVALIDPARAM ((HRESULT)0x80070057) /*E_INVALIDARG*/ +#define MA_DSERR_INVALIDCALL ((HRESULT)0x88780032) +#define MA_DSERR_GENERIC ((HRESULT)0x80004005) /*E_FAIL*/ +#define MA_DSERR_PRIOLEVELNEEDED ((HRESULT)0x88780046) +#define MA_DSERR_OUTOFMEMORY ((HRESULT)0x8007000E) /*E_OUTOFMEMORY*/ +#define MA_DSERR_BADFORMAT ((HRESULT)0x88780064) +#define MA_DSERR_UNSUPPORTED ((HRESULT)0x80004001) /*E_NOTIMPL*/ +#define MA_DSERR_NODRIVER ((HRESULT)0x88780078) +#define MA_DSERR_ALREADYINITIALIZED ((HRESULT)0x88780082) +#define MA_DSERR_NOAGGREGATION ((HRESULT)0x80040110) /*CLASS_E_NOAGGREGATION*/ +#define MA_DSERR_BUFFERLOST ((HRESULT)0x88780096) +#define MA_DSERR_OTHERAPPHASPRIO ((HRESULT)0x887800A0) +#define MA_DSERR_UNINITIALIZED ((HRESULT)0x887800AA) +#define MA_DSERR_NOINTERFACE ((HRESULT)0x80004002) /*E_NOINTERFACE*/ +#define MA_DSERR_ACCESSDENIED ((HRESULT)0x80070005) /*E_ACCESSDENIED*/ +#define MA_DSERR_BUFFERTOOSMALL ((HRESULT)0x887800B4) +#define MA_DSERR_DS8_REQUIRED ((HRESULT)0x887800BE) +#define MA_DSERR_SENDLOOP ((HRESULT)0x887800C8) +#define MA_DSERR_BADSENDBUFFERGUID ((HRESULT)0x887800D2) +#define MA_DSERR_OBJECTNOTFOUND ((HRESULT)0x88781161) +#define MA_DSERR_FXUNAVAILABLE ((HRESULT)0x887800DC) + +static ma_result ma_result_from_HRESULT(HRESULT hr) +{ + switch (hr) + { + case NOERROR: return MA_SUCCESS; + /*case S_OK: return MA_SUCCESS;*/ + + case E_POINTER: return MA_INVALID_ARGS; + case E_UNEXPECTED: return MA_ERROR; + case E_NOTIMPL: return MA_NOT_IMPLEMENTED; + case E_OUTOFMEMORY: return MA_OUT_OF_MEMORY; + case E_INVALIDARG: return MA_INVALID_ARGS; + case E_NOINTERFACE: return MA_API_NOT_FOUND; + case E_HANDLE: return MA_INVALID_ARGS; + case E_ABORT: return MA_ERROR; + case E_FAIL: return MA_ERROR; + case E_ACCESSDENIED: return MA_ACCESS_DENIED; + + /* WASAPI */ + case MA_AUDCLNT_E_NOT_INITIALIZED: return MA_DEVICE_NOT_INITIALIZED; + case MA_AUDCLNT_E_ALREADY_INITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED; + case MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_DEVICE_INVALIDATED: return MA_UNAVAILABLE; + case MA_AUDCLNT_E_NOT_STOPPED: return MA_DEVICE_NOT_STOPPED; + case MA_AUDCLNT_E_BUFFER_TOO_LARGE: return MA_TOO_BIG; + case MA_AUDCLNT_E_OUT_OF_ORDER: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_UNSUPPORTED_FORMAT: return MA_FORMAT_NOT_SUPPORTED; + case MA_AUDCLNT_E_INVALID_SIZE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_DEVICE_IN_USE: return MA_BUSY; + case MA_AUDCLNT_E_BUFFER_OPERATION_PENDING: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_THREAD_NOT_REGISTERED: return MA_DOES_NOT_EXIST; + case MA_AUDCLNT_E_NO_SINGLE_PROCESS: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED: return MA_SHARE_MODE_NOT_SUPPORTED; + case MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED: return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + case MA_AUDCLNT_E_SERVICE_NOT_RUNNING: return MA_NOT_CONNECTED; + case MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY: return MA_SHARE_MODE_NOT_SUPPORTED; + case MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_EVENTHANDLE_NOT_SET: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_BUFFER_SIZE_ERROR: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_CPUUSAGE_EXCEEDED: return MA_ERROR; + case MA_AUDCLNT_E_BUFFER_ERROR: return MA_ERROR; + case MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INVALID_DEVICE_PERIOD: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INVALID_STREAM_FLAG: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES: return MA_OUT_OF_MEMORY; + case MA_AUDCLNT_E_OFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_RESOURCES_INVALIDATED: return MA_INVALID_DATA; + case MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_HEADTRACKING_ENABLED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_S_BUFFER_EMPTY: return MA_NO_SPACE; + case MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED: return MA_ALREADY_EXISTS; + case MA_AUDCLNT_S_POSITION_STALLED: return MA_ERROR; + + /* DirectSound */ + /*case MA_DS_OK: return MA_SUCCESS;*/ /* S_OK */ + case MA_DS_NO_VIRTUALIZATION: return MA_SUCCESS; + case MA_DSERR_ALLOCATED: return MA_ALREADY_IN_USE; + case MA_DSERR_CONTROLUNAVAIL: return MA_INVALID_OPERATION; + /*case MA_DSERR_INVALIDPARAM: return MA_INVALID_ARGS;*/ /* E_INVALIDARG */ + case MA_DSERR_INVALIDCALL: return MA_INVALID_OPERATION; + /*case MA_DSERR_GENERIC: return MA_ERROR;*/ /* E_FAIL */ + case MA_DSERR_PRIOLEVELNEEDED: return MA_INVALID_OPERATION; + /*case MA_DSERR_OUTOFMEMORY: return MA_OUT_OF_MEMORY;*/ /* E_OUTOFMEMORY */ + case MA_DSERR_BADFORMAT: return MA_FORMAT_NOT_SUPPORTED; + /*case MA_DSERR_UNSUPPORTED: return MA_NOT_IMPLEMENTED;*/ /* E_NOTIMPL */ + case MA_DSERR_NODRIVER: return MA_FAILED_TO_INIT_BACKEND; + case MA_DSERR_ALREADYINITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED; + case MA_DSERR_NOAGGREGATION: return MA_ERROR; + case MA_DSERR_BUFFERLOST: return MA_UNAVAILABLE; + case MA_DSERR_OTHERAPPHASPRIO: return MA_ACCESS_DENIED; + case MA_DSERR_UNINITIALIZED: return MA_DEVICE_NOT_INITIALIZED; + /*case MA_DSERR_NOINTERFACE: return MA_API_NOT_FOUND;*/ /* E_NOINTERFACE */ + /*case MA_DSERR_ACCESSDENIED: return MA_ACCESS_DENIED;*/ /* E_ACCESSDENIED */ + case MA_DSERR_BUFFERTOOSMALL: return MA_NO_SPACE; + case MA_DSERR_DS8_REQUIRED: return MA_INVALID_OPERATION; + case MA_DSERR_SENDLOOP: return MA_DEADLOCK; + case MA_DSERR_BADSENDBUFFERGUID: return MA_INVALID_ARGS; + case MA_DSERR_OBJECTNOTFOUND: return MA_NO_DEVICE; + case MA_DSERR_FXUNAVAILABLE: return MA_UNAVAILABLE; + + default: return MA_ERROR; + } +} + +/* PROPVARIANT */ +#define MA_VT_LPWSTR 31 +#define MA_VT_BLOB 65 + +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ +#if defined(__clang__) +#pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */ +#endif +#endif +typedef struct +{ + WORD vt; + WORD wReserved1; + WORD wReserved2; + WORD wReserved3; + union + { + struct + { + ULONG cbSize; + BYTE* pBlobData; + } blob; + WCHAR* pwszVal; + char pad[16]; /* Just to ensure the size of the struct matches the official version. */ + }; +} MA_PROPVARIANT; +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) +#pragma GCC diagnostic pop +#endif + +typedef HRESULT (WINAPI * MA_PFN_CoInitialize)(void* pvReserved); +typedef HRESULT (WINAPI * MA_PFN_CoInitializeEx)(void* pvReserved, DWORD dwCoInit); +typedef void (WINAPI * MA_PFN_CoUninitialize)(void); +typedef HRESULT (WINAPI * MA_PFN_CoCreateInstance)(const IID* rclsid, void* pUnkOuter, DWORD dwClsContext, const IID* riid, void* ppv); +typedef void (WINAPI * MA_PFN_CoTaskMemFree)(void* pv); +typedef HRESULT (WINAPI * MA_PFN_PropVariantClear)(MA_PROPVARIANT *pvar); +typedef int (WINAPI * MA_PFN_StringFromGUID2)(const GUID* const rguid, WCHAR* lpsz, int cchMax); + +typedef HWND (WINAPI * MA_PFN_GetForegroundWindow)(void); +typedef HWND (WINAPI * MA_PFN_GetDesktopWindow)(void); + +#if defined(MA_WIN32_DESKTOP) +/* Microsoft documents these APIs as returning LSTATUS, but the Win32 API shipping with some compilers do not define it. It's just a LONG. */ +typedef LONG (WINAPI * MA_PFN_RegOpenKeyExA)(HKEY hKey, const char* lpSubKey, DWORD ulOptions, DWORD samDesired, HKEY* phkResult); +typedef LONG (WINAPI * MA_PFN_RegCloseKey)(HKEY hKey); +typedef LONG (WINAPI * MA_PFN_RegQueryValueExA)(HKEY hKey, const char* lpValueName, DWORD* lpReserved, DWORD* lpType, BYTE* lpData, DWORD* lpcbData); +#endif /* MA_WIN32_DESKTOP */ + + +MA_API size_t ma_strlen_WCHAR(const WCHAR* str) +{ + size_t len = 0; + while (str[len] != '\0') { + len += 1; + } + + return len; +} + +MA_API int ma_strcmp_WCHAR(const WCHAR *s1, const WCHAR *s2) +{ + while (*s1 != '\0' && *s1 == *s2) { + s1 += 1; + s2 += 1; + } + + return *s1 - *s2; +} + +MA_API int ma_strcpy_s_WCHAR(WCHAR* dst, size_t dstCap, const WCHAR* src) +{ + size_t i; + + if (dst == 0) { + return 22; + } + if (dstCap == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + for (i = 0; i < dstCap && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (i < dstCap) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} +#endif /* MA_WIN32 */ + + +#define MA_DEFAULT_PLAYBACK_DEVICE_NAME "Default Playback Device" +#define MA_DEFAULT_CAPTURE_DEVICE_NAME "Default Capture Device" + + + + +/******************************************************************************* + +Timing + +*******************************************************************************/ +#if defined(MA_WIN32) && !defined(MA_POSIX) +static LARGE_INTEGER g_ma_TimerFrequency; /* <-- Initialized to zero since it's static. */ +void ma_timer_init(ma_timer* pTimer) +{ + LARGE_INTEGER counter; + + if (g_ma_TimerFrequency.QuadPart == 0) { + QueryPerformanceFrequency(&g_ma_TimerFrequency); + } + + QueryPerformanceCounter(&counter); + pTimer->counter = counter.QuadPart; +} + +double ma_timer_get_time_in_seconds(ma_timer* pTimer) +{ + LARGE_INTEGER counter; + if (!QueryPerformanceCounter(&counter)) { + return 0; + } + + return (double)(counter.QuadPart - pTimer->counter) / g_ma_TimerFrequency.QuadPart; +} +#elif defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200) +static ma_uint64 g_ma_TimerFrequency = 0; +static void ma_timer_init(ma_timer* pTimer) +{ + mach_timebase_info_data_t baseTime; + mach_timebase_info(&baseTime); + g_ma_TimerFrequency = (baseTime.denom * 1e9) / baseTime.numer; + + pTimer->counter = mach_absolute_time(); +} + +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) +{ + ma_uint64 newTimeCounter = mach_absolute_time(); + ma_uint64 oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / g_ma_TimerFrequency; +} +#elif defined(MA_EMSCRIPTEN) +static MA_INLINE void ma_timer_init(ma_timer* pTimer) +{ + pTimer->counterD = emscripten_get_now(); +} + +static MA_INLINE double ma_timer_get_time_in_seconds(ma_timer* pTimer) +{ + return (emscripten_get_now() - pTimer->counterD) / 1000; /* Emscripten is in milliseconds. */ +} +#else +#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309L +#if defined(CLOCK_MONOTONIC) +#define MA_CLOCK_ID CLOCK_MONOTONIC +#else +#define MA_CLOCK_ID CLOCK_REALTIME +#endif + +static void ma_timer_init(ma_timer* pTimer) +{ + struct timespec newTime; + clock_gettime(MA_CLOCK_ID, &newTime); + + pTimer->counter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec; +} + +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) +{ + ma_uint64 newTimeCounter; + ma_uint64 oldTimeCounter; + + struct timespec newTime; + clock_gettime(MA_CLOCK_ID, &newTime); + + newTimeCounter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec; + oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / 1000000000.0; +} +#else +static void ma_timer_init(ma_timer* pTimer) +{ + struct timeval newTime; + gettimeofday(&newTime, NULL); + + pTimer->counter = (newTime.tv_sec * 1000000) + newTime.tv_usec; +} + +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) +{ + ma_uint64 newTimeCounter; + ma_uint64 oldTimeCounter; + + struct timeval newTime; + gettimeofday(&newTime, NULL); + + newTimeCounter = (newTime.tv_sec * 1000000) + newTime.tv_usec; + oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / 1000000.0; +} +#endif +#endif + + + +#if 0 +static ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn) +{ + ma_uint32 closestRate = 0; + ma_uint32 closestDiff = 0xFFFFFFFF; + size_t iStandardRate; + + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + ma_uint32 diff; + + if (sampleRateIn > standardRate) { + diff = sampleRateIn - standardRate; + } else { + diff = standardRate - sampleRateIn; + } + + if (diff == 0) { + return standardRate; /* The input sample rate is a standard rate. */ + } + + if (closestDiff > diff) { + closestDiff = diff; + closestRate = standardRate; + } + } + + return closestRate; +} +#endif + + +static MA_INLINE unsigned int ma_device_disable_denormals(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (!pDevice->noDisableDenormals) { + return ma_disable_denormals(); + } else { + return 0; + } +} + +static MA_INLINE void ma_device_restore_denormals(ma_device* pDevice, unsigned int prevState) +{ + MA_ASSERT(pDevice != NULL); + + if (!pDevice->noDisableDenormals) { + ma_restore_denormals(prevState); + } else { + /* Do nothing. */ + (void)prevState; + } +} + +static ma_device_notification ma_device_notification_init(ma_device* pDevice, ma_device_notification_type type) +{ + ma_device_notification notification; + + MA_ZERO_OBJECT(¬ification); + notification.pDevice = pDevice; + notification.type = type; + + return notification; +} + +static void ma_device__on_notification(ma_device_notification notification) +{ + MA_ASSERT(notification.pDevice != NULL); + + if (notification.pDevice->onNotification != NULL) { + notification.pDevice->onNotification(¬ification); + } + + /* TEMP FOR COMPATIBILITY: If it's a stopped notification, fire the onStop callback as well. This is only for backwards compatibility and will be removed. */ + if (notification.pDevice->onStop != NULL && notification.type == ma_device_notification_type_stopped) { + notification.pDevice->onStop(notification.pDevice); + } +} + +void ma_device__on_notification_started(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_started)); +} + +void ma_device__on_notification_stopped(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_stopped)); +} + +void ma_device__on_notification_rerouted(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_rerouted)); +} + +void ma_device__on_notification_interruption_began(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_interruption_began)); +} + +void ma_device__on_notification_interruption_ended(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_interruption_ended)); +} + + +static void ma_device__on_data_inner(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pDevice->onData != NULL); + + if (!pDevice->noPreSilencedOutputBuffer && pFramesOut != NULL) { + ma_silence_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels); + } + + pDevice->onData(pDevice, pFramesOut, pFramesIn, frameCount); +} + +static void ma_device__on_data(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + MA_ASSERT(pDevice != NULL); + + /* Don't read more data from the client if we're in the process of stopping. */ + if (ma_device_get_state(pDevice) == ma_device_state_stopping) { + return; + } + + if (pDevice->noFixedSizedCallback) { + /* Fast path. Not using a fixed sized callback. Process directly from the specified buffers. */ + ma_device__on_data_inner(pDevice, pFramesOut, pFramesIn, frameCount); + } else { + /* Slow path. Using a fixed sized callback. Need to use the intermediary buffer. */ + ma_uint32 totalFramesProcessed = 0; + + while (totalFramesProcessed < frameCount) { + ma_uint32 totalFramesRemaining = frameCount - totalFramesProcessed; + ma_uint32 framesToProcessThisIteration = 0; + + if (pFramesIn != NULL) { + /* Capturing. Write to the intermediary buffer. If there's no room, fire the callback to empty it. */ + if (pDevice->capture.intermediaryBufferLen < pDevice->capture.intermediaryBufferCap) { + /* There's some room left in the intermediary buffer. Write to it without firing the callback. */ + framesToProcessThisIteration = totalFramesRemaining; + if (framesToProcessThisIteration > pDevice->capture.intermediaryBufferCap - pDevice->capture.intermediaryBufferLen) { + framesToProcessThisIteration = pDevice->capture.intermediaryBufferCap - pDevice->capture.intermediaryBufferLen; + } + + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferLen, pDevice->capture.format, pDevice->capture.channels), + ma_offset_pcm_frames_const_ptr(pFramesIn, totalFramesProcessed, pDevice->capture.format, pDevice->capture.channels), + framesToProcessThisIteration, + pDevice->capture.format, pDevice->capture.channels); + + pDevice->capture.intermediaryBufferLen += framesToProcessThisIteration; + } + + if (pDevice->capture.intermediaryBufferLen == pDevice->capture.intermediaryBufferCap) { + /* No room left in the intermediary buffer. Fire the data callback. */ + if (pDevice->type == ma_device_type_duplex) { + /* We'll do the duplex data callback later after we've processed the playback data. */ + } else { + ma_device__on_data_inner(pDevice, NULL, pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap); + + /* The intermediary buffer has just been drained. */ + pDevice->capture.intermediaryBufferLen = 0; + } + } + } + + if (pFramesOut != NULL) { + /* Playing back. Read from the intermediary buffer. If there's nothing in it, fire the callback to fill it. */ + if (pDevice->playback.intermediaryBufferLen > 0) { + /* There's some content in the intermediary buffer. Read from that without firing the callback. */ + if (pDevice->type == ma_device_type_duplex) { + /* The frames processed this iteration for a duplex device will always be based on the capture side. Leave it unmodified. */ + } else { + framesToProcessThisIteration = totalFramesRemaining; + if (framesToProcessThisIteration > pDevice->playback.intermediaryBufferLen) { + framesToProcessThisIteration = pDevice->playback.intermediaryBufferLen; + } + } + + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pFramesOut, totalFramesProcessed, pDevice->playback.format, pDevice->playback.channels), + ma_offset_pcm_frames_ptr(pDevice->playback.pIntermediaryBuffer, pDevice->playback.intermediaryBufferCap - pDevice->playback.intermediaryBufferLen, pDevice->playback.format, pDevice->playback.channels), + framesToProcessThisIteration, + pDevice->playback.format, pDevice->playback.channels); + + pDevice->playback.intermediaryBufferLen -= framesToProcessThisIteration; + } + + if (pDevice->playback.intermediaryBufferLen == 0) { + /* There's nothing in the intermediary buffer. Fire the data callback to fill it. */ + if (pDevice->type == ma_device_type_duplex) { + /* In duplex mode, the data callback will be fired later. Nothing to do here. */ + } else { + ma_device__on_data_inner(pDevice, pDevice->playback.pIntermediaryBuffer, NULL, pDevice->playback.intermediaryBufferCap); + + /* The intermediary buffer has just been filled. */ + pDevice->playback.intermediaryBufferLen = pDevice->playback.intermediaryBufferCap; + } + } + } + + /* If we're in duplex mode we might need to do a refill of the data. */ + if (pDevice->type == ma_device_type_duplex) { + if (pDevice->capture.intermediaryBufferLen == pDevice->capture.intermediaryBufferCap) { + ma_device__on_data_inner(pDevice, pDevice->playback.pIntermediaryBuffer, pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap); + + pDevice->playback.intermediaryBufferLen = pDevice->playback.intermediaryBufferCap; /* The playback buffer will have just been filled. */ + pDevice->capture.intermediaryBufferLen = 0; /* The intermediary buffer has just been drained. */ + } + } + + /* Make sure this is only incremented once in the duplex case. */ + totalFramesProcessed += framesToProcessThisIteration; + } + } +} + +static void ma_device__handle_data_callback(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + float masterVolumeFactor; + + ma_device_get_master_volume(pDevice, &masterVolumeFactor); /* Use ma_device_get_master_volume() to ensure the volume is loaded atomically. */ + + if (pDevice->onData) { + unsigned int prevDenormalState = ma_device_disable_denormals(pDevice); + { + /* Volume control of input makes things a bit awkward because the input buffer is read-only. We'll need to use a temp buffer and loop in this case. */ + if (pFramesIn != NULL && masterVolumeFactor < 1) { + ma_uint8 tempFramesIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint32 totalFramesProcessed = 0; + while (totalFramesProcessed < frameCount) { + ma_uint32 framesToProcessThisIteration = frameCount - totalFramesProcessed; + if (framesToProcessThisIteration > sizeof(tempFramesIn)/bpfCapture) { + framesToProcessThisIteration = sizeof(tempFramesIn)/bpfCapture; + } + + ma_copy_and_apply_volume_factor_pcm_frames(tempFramesIn, ma_offset_ptr(pFramesIn, totalFramesProcessed*bpfCapture), framesToProcessThisIteration, pDevice->capture.format, pDevice->capture.channels, masterVolumeFactor); + + ma_device__on_data(pDevice, ma_offset_ptr(pFramesOut, totalFramesProcessed*bpfPlayback), tempFramesIn, framesToProcessThisIteration); + + totalFramesProcessed += framesToProcessThisIteration; + } + } else { + ma_device__on_data(pDevice, pFramesOut, pFramesIn, frameCount); + } + + /* Volume control and clipping for playback devices. */ + if (pFramesOut != NULL) { + if (masterVolumeFactor < 1) { + if (pFramesIn == NULL) { /* <-- In full-duplex situations, the volume will have been applied to the input samples before the data callback. Applying it again post-callback will incorrectly compound it. */ + ma_apply_volume_factor_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels, masterVolumeFactor); + } + } + + if (!pDevice->noClip && pDevice->playback.format == ma_format_f32) { + ma_clip_samples_f32((float*)pFramesOut, (const float*)pFramesOut, frameCount * pDevice->playback.channels); /* Intentionally specifying the same pointer for both input and output for in-place processing. */ + } + } + } + ma_device_restore_denormals(pDevice, prevDenormalState); + } +} + + + +/* A helper function for reading sample data from the client. */ +static void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pFramesOut) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesOut != NULL); + + if (pDevice->playback.converter.isPassthrough) { + ma_device__handle_data_callback(pDevice, pFramesOut, NULL, frameCount); + } else { + ma_result result; + ma_uint64 totalFramesReadOut; + void* pRunningFramesOut; + + totalFramesReadOut = 0; + pRunningFramesOut = pFramesOut; + + /* + We run slightly different logic depending on whether or not we're using a heap-allocated + buffer for caching input data. This will be the case if the data converter does not have + the ability to retrieve the required input frame count for a given output frame count. + */ + if (pDevice->playback.pInputCache != NULL) { + while (totalFramesReadOut < frameCount) { + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + + /* If there's any data available in the cache, that needs to get processed first. */ + if (pDevice->playback.inputCacheRemaining > 0) { + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > pDevice->playback.inputCacheRemaining) { + framesToReadThisIterationIn = pDevice->playback.inputCacheRemaining; + } + + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, ma_offset_pcm_frames_ptr(pDevice->playback.pInputCache, pDevice->playback.inputCacheConsumed, pDevice->playback.format, pDevice->playback.channels), &framesToReadThisIterationIn, pRunningFramesOut, &framesToReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + pDevice->playback.inputCacheConsumed += framesToReadThisIterationIn; + pDevice->playback.inputCacheRemaining -= framesToReadThisIterationIn; + + totalFramesReadOut += framesToReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesToReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + + if (framesToReadThisIterationIn == 0 && framesToReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + + /* Getting here means there's no data in the cache and we need to fill it up with data from the client. */ + if (pDevice->playback.inputCacheRemaining == 0) { + ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, NULL, (ma_uint32)pDevice->playback.inputCacheCap); + + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = pDevice->playback.inputCacheCap; + } + } + } else { + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In client format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; + + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } + + ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, framesToReadThisIterationOut, &requiredInputFrameCount); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + + if (framesToReadThisIterationIn > 0) { + ma_device__handle_data_callback(pDevice, pIntermediaryBuffer, NULL, (ma_uint32)framesToReadThisIterationIn); + } + + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationIn = framesToReadThisIterationIn; + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + totalFramesReadOut += framesReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + } + } +} + +/* A helper for sending sample data to the client. */ +static void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); + + if (pDevice->capture.converter.isPassthrough) { + ma_device__handle_data_callback(pDevice, NULL, pFramesInDeviceFormat, frameCountInDeviceFormat); + } else { + ma_result result; + ma_uint8 pFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint64 framesInClientFormatCap = sizeof(pFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 totalDeviceFramesProcessed = 0; + ma_uint64 totalClientFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; + + /* We just keep going until we've exhaused all of our input frames and cannot generate any more output frames. */ + for (;;) { + ma_uint64 deviceFramesProcessedThisIteration; + ma_uint64 clientFramesProcessedThisIteration; + + deviceFramesProcessedThisIteration = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + clientFramesProcessedThisIteration = framesInClientFormatCap; + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &deviceFramesProcessedThisIteration, pFramesInClientFormat, &clientFramesProcessedThisIteration); + if (result != MA_SUCCESS) { + break; + } + + if (clientFramesProcessedThisIteration > 0) { + ma_device__handle_data_callback(pDevice, NULL, pFramesInClientFormat, (ma_uint32)clientFramesProcessedThisIteration); /* Safe cast. */ + } + + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, deviceFramesProcessedThisIteration * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += deviceFramesProcessedThisIteration; + totalClientFramesProcessed += clientFramesProcessedThisIteration; + + /* This is just to silence a warning. I might want to use this variable later so leaving in place for now. */ + (void)totalClientFramesProcessed; + + if (deviceFramesProcessedThisIteration == 0 && clientFramesProcessedThisIteration == 0) { + break; /* We're done. */ + } + } + } +} + +static ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat, ma_pcm_rb* pRB) +{ + ma_result result; + ma_uint32 totalDeviceFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); + MA_ASSERT(pRB != NULL); + + /* Write to the ring buffer. The ring buffer is in the client format which means we need to convert. */ + for (;;) { + ma_uint32 framesToProcessInDeviceFormat = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + ma_uint32 framesToProcessInClientFormat = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 framesProcessedInDeviceFormat; + ma_uint64 framesProcessedInClientFormat; + void* pFramesInClientFormat; + + result = ma_pcm_rb_acquire_write(pRB, &framesToProcessInClientFormat, &pFramesInClientFormat); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "Failed to acquire capture PCM frames from ring buffer."); + break; + } + + if (framesToProcessInClientFormat == 0) { + if (ma_pcm_rb_pointer_distance(pRB) == (ma_int32)ma_pcm_rb_get_subbuffer_size(pRB)) { + break; /* Overrun. Not enough room in the ring buffer for input frame. Excess frames are dropped. */ + } + } + + /* Convert. */ + framesProcessedInDeviceFormat = framesToProcessInDeviceFormat; + framesProcessedInClientFormat = framesToProcessInClientFormat; + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &framesProcessedInDeviceFormat, pFramesInClientFormat, &framesProcessedInClientFormat); + if (result != MA_SUCCESS) { + break; + } + + result = ma_pcm_rb_commit_write(pRB, (ma_uint32)framesProcessedInClientFormat); /* Safe cast. */ + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "Failed to commit capture PCM frames to ring buffer."); + break; + } + + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, framesProcessedInDeviceFormat * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += (ma_uint32)framesProcessedInDeviceFormat; /* Safe cast. */ + + /* We're done when we're unable to process any client nor device frames. */ + if (framesProcessedInClientFormat == 0 && framesProcessedInDeviceFormat == 0) { + break; /* Done. */ + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB) +{ + ma_result result; + ma_uint8 silentInputFrames[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 totalFramesReadOut = 0; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesInInternalFormat != NULL); + MA_ASSERT(pRB != NULL); + MA_ASSERT(pDevice->playback.pInputCache != NULL); + + /* + Sitting in the ring buffer should be captured data from the capture callback in external format. If there's not enough data in there for + the whole frameCount frames we just use silence instead for the input data. + */ + MA_ZERO_MEMORY(silentInputFrames, sizeof(silentInputFrames)); + + while (totalFramesReadOut < frameCount && ma_device_is_started(pDevice)) { + /* + We should have a buffer allocated on the heap. Any playback frames still sitting in there + need to be sent to the internal device before we process any more data from the client. + */ + if (pDevice->playback.inputCacheRemaining > 0) { + ma_uint64 framesConvertedIn = pDevice->playback.inputCacheRemaining; + ma_uint64 framesConvertedOut = (frameCount - totalFramesReadOut); + ma_data_converter_process_pcm_frames(&pDevice->playback.converter, ma_offset_pcm_frames_ptr(pDevice->playback.pInputCache, pDevice->playback.inputCacheConsumed, pDevice->playback.format, pDevice->playback.channels), &framesConvertedIn, pFramesInInternalFormat, &framesConvertedOut); + + pDevice->playback.inputCacheConsumed += framesConvertedIn; + pDevice->playback.inputCacheRemaining -= framesConvertedIn; + + totalFramesReadOut += (ma_uint32)framesConvertedOut; /* Safe cast. */ + pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, framesConvertedOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } + + /* If there's no more data in the cache we'll need to fill it with some. */ + if (totalFramesReadOut < frameCount && pDevice->playback.inputCacheRemaining == 0) { + ma_uint32 inputFrameCount; + void* pInputFrames; + + inputFrameCount = (ma_uint32)pDevice->playback.inputCacheCap; + result = ma_pcm_rb_acquire_read(pRB, &inputFrameCount, &pInputFrames); + if (result == MA_SUCCESS) { + if (inputFrameCount > 0) { + ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, pInputFrames, inputFrameCount); + } else { + if (ma_pcm_rb_pointer_distance(pRB) == 0) { + break; /* Underrun. */ + } + } + } else { + /* No capture data available. Feed in silence. */ + inputFrameCount = (ma_uint32)ma_min(pDevice->playback.inputCacheCap, sizeof(silentInputFrames) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, silentInputFrames, inputFrameCount); + } + + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = inputFrameCount; + + result = ma_pcm_rb_commit_read(pRB, inputFrameCount); + if (result != MA_SUCCESS) { + return result; /* Should never happen. */ + } + } + } + + return MA_SUCCESS; +} + +/* A helper for changing the state of the device. */ +static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_device_state newState) +{ + ma_atomic_device_state_set(&pDevice->state, newState); +} + + +#if defined(MA_WIN32) +GUID MA_GUID_KSDATAFORMAT_SUBTYPE_PCM = {0x00000001, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}}; +GUID MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = {0x00000003, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}}; +/*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_ALAW = {0x00000006, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/ +/*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_MULAW = {0x00000007, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/ +#endif + + + +MA_API ma_uint32 ma_get_format_priority_index(ma_format format) /* Lower = better. */ +{ + ma_uint32 i; + for (i = 0; i < ma_countof(g_maFormatPriorities); ++i) { + if (g_maFormatPriorities[i] == format) { + return i; + } + } + + /* Getting here means the format could not be found or is equal to ma_format_unknown. */ + return (ma_uint32)-1; +} + +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType); + +static ma_bool32 ma_device_descriptor_is_valid(const ma_device_descriptor* pDeviceDescriptor) +{ + if (pDeviceDescriptor == NULL) { + return MA_FALSE; + } + + if (pDeviceDescriptor->format == ma_format_unknown) { + return MA_FALSE; + } + + if (pDeviceDescriptor->channels == 0 || pDeviceDescriptor->channels > MA_MAX_CHANNELS) { + return MA_FALSE; + } + + if (pDeviceDescriptor->sampleRate == 0) { + return MA_FALSE; + } + + return MA_TRUE; +} + + +static ma_result ma_device_audio_thread__default_read_write(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = 0; + ma_uint32 playbackDeviceDataCapInFrames = 0; + + MA_ASSERT(pDevice != NULL); + + /* Just some quick validation on the device type and the available callbacks. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + if (pDevice->pContext->callbacks.onDeviceRead == NULL) { + return MA_NOT_IMPLEMENTED; + } + + capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->pContext->callbacks.onDeviceWrite == NULL) { + return MA_NOT_IMPLEMENTED; + } + + playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + /* NOTE: The device was started outside of this function, in the worker thread. */ + + while (ma_device_get_state(pDevice) == ma_device_state_started && !exitLoop) { + switch (pDevice->type) { + case ma_device_type_duplex: + { + /* The process is: onDeviceRead() -> convert -> callback -> convert -> onDeviceWrite() */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = pDevice->pContext->callbacks.onDeviceRead(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + /* At this point we have our captured data in device format and we now need to convert it to client format. */ + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__handle_data_callback(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = pDevice->pContext->callbacks.onDeviceWrite(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__null()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + /* Make sure we don't get stuck in the inner loop. */ + if (capturedDeviceFramesProcessed == 0) { + break; + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + case ma_device_type_loopback: + { + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > capturedDeviceDataCapInFrames) { + framesToReadThisIteration = capturedDeviceDataCapInFrames; + } + + result = pDevice->pContext->callbacks.onDeviceRead(pDevice, capturedDeviceData, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + /* Make sure we don't get stuck in the inner loop. */ + if (framesProcessed == 0) { + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, capturedDeviceData); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > playbackDeviceDataCapInFrames) { + framesToWriteThisIteration = playbackDeviceDataCapInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, playbackDeviceData); + + result = pDevice->pContext->callbacks.onDeviceWrite(pDevice, playbackDeviceData, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + /* Make sure we don't get stuck in the inner loop. */ + if (framesProcessed == 0) { + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* Should never get here. */ + default: break; + } + } + + return result; +} + + + +/******************************************************************************* + +Null Backend + +*******************************************************************************/ +#ifdef MA_HAS_NULL + +#define MA_DEVICE_OP_NONE__NULL 0 +#define MA_DEVICE_OP_START__NULL 1 +#define MA_DEVICE_OP_SUSPEND__NULL 2 +#define MA_DEVICE_OP_KILL__NULL 3 + +static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData) +{ + ma_device* pDevice = (ma_device*)pData; + MA_ASSERT(pDevice != NULL); + + for (;;) { /* Keep the thread alive until the device is uninitialized. */ + ma_uint32 operation; + + /* Wait for an operation to be requested. */ + ma_event_wait(&pDevice->null_device.operationEvent); + + /* At this point an event should have been triggered. */ + operation = pDevice->null_device.operation; + + /* Starting the device needs to put the thread into a loop. */ + if (operation == MA_DEVICE_OP_START__NULL) { + /* Reset the timer just in case. */ + ma_timer_init(&pDevice->null_device.timer); + + /* Getting here means a suspend or kill operation has been requested. */ + pDevice->null_device.operationResult = MA_SUCCESS; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + continue; + } + + /* Suspending the device means we need to stop the timer and just continue the loop. */ + if (operation == MA_DEVICE_OP_SUSPEND__NULL) { + /* We need to add the current run time to the prior run time, then reset the timer. */ + pDevice->null_device.priorRunTime += ma_timer_get_time_in_seconds(&pDevice->null_device.timer); + ma_timer_init(&pDevice->null_device.timer); + + /* We're done. */ + pDevice->null_device.operationResult = MA_SUCCESS; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + continue; + } + + /* Killing the device means we need to get out of this loop so that this thread can terminate. */ + if (operation == MA_DEVICE_OP_KILL__NULL) { + pDevice->null_device.operationResult = MA_SUCCESS; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + break; + } + + /* Getting a signal on a "none" operation probably means an error. Return invalid operation. */ + if (operation == MA_DEVICE_OP_NONE__NULL) { + MA_ASSERT(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */ + pDevice->null_device.operationResult = MA_INVALID_OPERATION; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + continue; /* Continue the loop. Don't terminate. */ + } + } + + return (ma_thread_result)0; +} + +static ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation) +{ + ma_result result; + + /* + TODO: Need to review this and consider just using mutual exclusion. I think the original motivation + for this was to just post the event to a queue and return immediately, but that has since changed + and now this function is synchronous. I think this can be simplified to just use a mutex. + */ + + /* + The first thing to do is wait for an operation slot to become available. We only have a single slot for this, but we could extend this later + to support queing of operations. + */ + result = ma_semaphore_wait(&pDevice->null_device.operationSemaphore); + if (result != MA_SUCCESS) { + return result; /* Failed to wait for the event. */ + } + + /* + When we get here it means the background thread is not referencing the operation code and it can be changed. After changing this we need to + signal an event to the worker thread to let it know that it can start work. + */ + pDevice->null_device.operation = operation; + + /* Once the operation code has been set, the worker thread can start work. */ + if (ma_event_signal(&pDevice->null_device.operationEvent) != MA_SUCCESS) { + return MA_ERROR; + } + + /* We want everything to be synchronous so we're going to wait for the worker thread to complete it's operation. */ + if (ma_event_wait(&pDevice->null_device.operationCompletionEvent) != MA_SUCCESS) { + return MA_ERROR; + } + + return pDevice->null_device.operationResult; +} + +static ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice) +{ + ma_uint32 internalSampleRate; + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + internalSampleRate = pDevice->capture.internalSampleRate; + } else { + internalSampleRate = pDevice->playback.internalSampleRate; + } + + return (ma_uint64)((pDevice->null_device.priorRunTime + ma_timer_get_time_in_seconds(&pDevice->null_device.timer)) * internalSampleRate); +} + +static ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Playback Device", (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */ + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Capture Device", (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */ + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + (void)cbResult; /* Silence a static analysis warning. */ + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + if (pDeviceID != NULL && pDeviceID->nullbackend != 0) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Playback Device", (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Capture Device", (size_t)-1); + } + + pDeviceInfo->isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */ + + /* Support everything on the null backend. */ + pDeviceInfo->nativeDataFormats[0].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[0].channels = 0; + pDeviceInfo->nativeDataFormats[0].sampleRate = 0; + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + + (void)pContext; + return MA_SUCCESS; +} + + +static ma_result ma_device_uninit__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* Keep it clean and wait for the device thread to finish before returning. */ + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_KILL__NULL); + + /* Wait for the thread to finish before continuing. */ + ma_thread_wait(&pDevice->null_device.deviceThread); + + /* At this point the loop in the device thread is as good as terminated so we can uninitialize our events. */ + ma_semaphore_uninit(&pDevice->null_device.operationSemaphore); + ma_event_uninit(&pDevice->null_device.operationCompletionEvent); + ma_event_uninit(&pDevice->null_device.operationEvent); + + return MA_SUCCESS; +} + +static ma_result ma_device_init__null(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->null_device); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* The null backend supports everything exactly as we specify it. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + pDescriptorCapture->format = (pDescriptorCapture->format != ma_format_unknown) ? pDescriptorCapture->format : MA_DEFAULT_FORMAT; + pDescriptorCapture->channels = (pDescriptorCapture->channels != 0) ? pDescriptorCapture->channels : MA_DEFAULT_CHANNELS; + pDescriptorCapture->sampleRate = (pDescriptorCapture->sampleRate != 0) ? pDescriptorCapture->sampleRate : MA_DEFAULT_SAMPLE_RATE; + + if (pDescriptorCapture->channelMap[0] == MA_CHANNEL_NONE) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + } + + pDescriptorCapture->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + pDescriptorPlayback->format = (pDescriptorPlayback->format != ma_format_unknown) ? pDescriptorPlayback->format : MA_DEFAULT_FORMAT; + pDescriptorPlayback->channels = (pDescriptorPlayback->channels != 0) ? pDescriptorPlayback->channels : MA_DEFAULT_CHANNELS; + pDescriptorPlayback->sampleRate = (pDescriptorPlayback->sampleRate != 0) ? pDescriptorPlayback->sampleRate : MA_DEFAULT_SAMPLE_RATE; + + if (pDescriptorPlayback->channelMap[0] == MA_CHANNEL_NONE) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptorPlayback->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorPlayback->channels); + } + + pDescriptorPlayback->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + } + + /* + In order to get timing right, we need to create a thread that does nothing but keeps track of the timer. This timer is started when the + first period is "written" to it, and then stopped in ma_device_stop__null(). + */ + result = ma_event_init(&pDevice->null_device.operationEvent); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_event_init(&pDevice->null_device.operationCompletionEvent); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_semaphore_init(1, &pDevice->null_device.operationSemaphore); /* <-- It's important that the initial value is set to 1. */ + if (result != MA_SUCCESS) { + return result; + } + + result = ma_thread_create(&pDevice->null_device.deviceThread, pDevice->pContext->threadPriority, 0, ma_device_thread__null, pDevice, &pDevice->pContext->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL); + + ma_atomic_bool32_set(&pDevice->null_device.isStarted, MA_TRUE); + return MA_SUCCESS; +} + +static ma_result ma_device_stop__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL); + + ma_atomic_bool32_set(&pDevice->null_device.isStarted, MA_FALSE); + return MA_SUCCESS; +} + +static ma_bool32 ma_device_is_started__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + return ma_atomic_bool32_get(&pDevice->null_device.isStarted); +} + +static ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalPCMFramesProcessed; + ma_bool32 wasStartedOnEntry; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + wasStartedOnEntry = ma_device_is_started__null(pDevice); + + /* Keep going until everything has been read. */ + totalPCMFramesProcessed = 0; + while (totalPCMFramesProcessed < frameCount) { + ma_uint64 targetFrame; + + /* If there are any frames remaining in the current period, consume those first. */ + if (pDevice->null_device.currentPeriodFramesRemainingPlayback > 0) { + ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed); + ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingPlayback; + if (framesToProcess > framesRemaining) { + framesToProcess = framesRemaining; + } + + /* We don't actually do anything with pPCMFrames, so just mark it as unused to prevent a warning. */ + (void)pPCMFrames; + + pDevice->null_device.currentPeriodFramesRemainingPlayback -= framesToProcess; + totalPCMFramesProcessed += framesToProcess; + } + + /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */ + if (pDevice->null_device.currentPeriodFramesRemainingPlayback == 0) { + pDevice->null_device.currentPeriodFramesRemainingPlayback = 0; + + if (!ma_device_is_started__null(pDevice) && !wasStartedOnEntry) { + result = ma_device_start__null(pDevice); + if (result != MA_SUCCESS) { + break; + } + } + } + + /* If we've consumed the whole buffer we can return now. */ + MA_ASSERT(totalPCMFramesProcessed <= frameCount); + if (totalPCMFramesProcessed == frameCount) { + break; + } + + /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */ + targetFrame = pDevice->null_device.lastProcessedFramePlayback; + for (;;) { + ma_uint64 currentFrame; + + /* Stop waiting if the device has been stopped. */ + if (!ma_device_is_started__null(pDevice)) { + break; + } + + currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice); + if (currentFrame >= targetFrame) { + break; + } + + /* Getting here means we haven't yet reached the target sample, so continue waiting. */ + ma_sleep(10); + } + + pDevice->null_device.lastProcessedFramePlayback += pDevice->playback.internalPeriodSizeInFrames; + pDevice->null_device.currentPeriodFramesRemainingPlayback = pDevice->playback.internalPeriodSizeInFrames; + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalPCMFramesProcessed; + } + + return result; +} + +static ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalPCMFramesProcessed; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + /* Keep going until everything has been read. */ + totalPCMFramesProcessed = 0; + while (totalPCMFramesProcessed < frameCount) { + ma_uint64 targetFrame; + + /* If there are any frames remaining in the current period, consume those first. */ + if (pDevice->null_device.currentPeriodFramesRemainingCapture > 0) { + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed); + ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingCapture; + if (framesToProcess > framesRemaining) { + framesToProcess = framesRemaining; + } + + /* We need to ensure the output buffer is zeroed. */ + MA_ZERO_MEMORY(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf); + + pDevice->null_device.currentPeriodFramesRemainingCapture -= framesToProcess; + totalPCMFramesProcessed += framesToProcess; + } + + /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */ + if (pDevice->null_device.currentPeriodFramesRemainingCapture == 0) { + pDevice->null_device.currentPeriodFramesRemainingCapture = 0; + } + + /* If we've consumed the whole buffer we can return now. */ + MA_ASSERT(totalPCMFramesProcessed <= frameCount); + if (totalPCMFramesProcessed == frameCount) { + break; + } + + /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */ + targetFrame = pDevice->null_device.lastProcessedFrameCapture + pDevice->capture.internalPeriodSizeInFrames; + for (;;) { + ma_uint64 currentFrame; + + /* Stop waiting if the device has been stopped. */ + if (!ma_device_is_started__null(pDevice)) { + break; + } + + currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice); + if (currentFrame >= targetFrame) { + break; + } + + /* Getting here means we haven't yet reached the target sample, so continue waiting. */ + ma_sleep(10); + } + + pDevice->null_device.lastProcessedFrameCapture += pDevice->capture.internalPeriodSizeInFrames; + pDevice->null_device.currentPeriodFramesRemainingCapture = pDevice->capture.internalPeriodSizeInFrames; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalPCMFramesProcessed; + } + + return result; +} + +static ma_result ma_context_uninit__null(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_null); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__null(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + (void)pContext; + + pCallbacks->onContextInit = ma_context_init__null; + pCallbacks->onContextUninit = ma_context_uninit__null; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__null; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__null; + pCallbacks->onDeviceInit = ma_device_init__null; + pCallbacks->onDeviceUninit = ma_device_uninit__null; + pCallbacks->onDeviceStart = ma_device_start__null; + pCallbacks->onDeviceStop = ma_device_stop__null; + pCallbacks->onDeviceRead = ma_device_read__null; + pCallbacks->onDeviceWrite = ma_device_write__null; + pCallbacks->onDeviceDataLoop = NULL; /* Our backend is asynchronous with a blocking read-write API which means we can get miniaudio to deal with the audio thread. */ + + /* The null backend always works. */ + return MA_SUCCESS; +} +#endif + + + +/******************************************************************************* + +WIN32 COMMON + +*******************************************************************************/ +#if defined(MA_WIN32) +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +#define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) ((pContext->win32.CoInitializeEx) ? ((MA_PFN_CoInitializeEx)pContext->win32.CoInitializeEx)(pvReserved, dwCoInit) : ((MA_PFN_CoInitialize)pContext->win32.CoInitialize)(pvReserved)) +#define ma_CoUninitialize(pContext) ((MA_PFN_CoUninitialize)pContext->win32.CoUninitialize)() +#define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) ((MA_PFN_CoCreateInstance)pContext->win32.CoCreateInstance)(rclsid, pUnkOuter, dwClsContext, riid, ppv) +#define ma_CoTaskMemFree(pContext, pv) ((MA_PFN_CoTaskMemFree)pContext->win32.CoTaskMemFree)(pv) +#define ma_PropVariantClear(pContext, pvar) ((MA_PFN_PropVariantClear)pContext->win32.PropVariantClear)(pvar) +#else +#define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) CoInitializeEx(pvReserved, dwCoInit) +#define ma_CoUninitialize(pContext) CoUninitialize() +#define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) CoCreateInstance(rclsid, pUnkOuter, dwClsContext, riid, ppv) +#define ma_CoTaskMemFree(pContext, pv) CoTaskMemFree(pv) +#define ma_PropVariantClear(pContext, pvar) PropVariantClear(pvar) +#endif + +#if !defined(MAXULONG_PTR) && !defined(__WATCOMC__) +typedef size_t DWORD_PTR; +#endif + +#if !defined(WAVE_FORMAT_1M08) +#define WAVE_FORMAT_1M08 0x00000001 +#define WAVE_FORMAT_1S08 0x00000002 +#define WAVE_FORMAT_1M16 0x00000004 +#define WAVE_FORMAT_1S16 0x00000008 +#define WAVE_FORMAT_2M08 0x00000010 +#define WAVE_FORMAT_2S08 0x00000020 +#define WAVE_FORMAT_2M16 0x00000040 +#define WAVE_FORMAT_2S16 0x00000080 +#define WAVE_FORMAT_4M08 0x00000100 +#define WAVE_FORMAT_4S08 0x00000200 +#define WAVE_FORMAT_4M16 0x00000400 +#define WAVE_FORMAT_4S16 0x00000800 +#endif + +#if !defined(WAVE_FORMAT_44M08) +#define WAVE_FORMAT_44M08 0x00000100 +#define WAVE_FORMAT_44S08 0x00000200 +#define WAVE_FORMAT_44M16 0x00000400 +#define WAVE_FORMAT_44S16 0x00000800 +#define WAVE_FORMAT_48M08 0x00001000 +#define WAVE_FORMAT_48S08 0x00002000 +#define WAVE_FORMAT_48M16 0x00004000 +#define WAVE_FORMAT_48S16 0x00008000 +#define WAVE_FORMAT_96M08 0x00010000 +#define WAVE_FORMAT_96S08 0x00020000 +#define WAVE_FORMAT_96M16 0x00040000 +#define WAVE_FORMAT_96S16 0x00080000 +#endif + +#ifndef SPEAKER_FRONT_LEFT +#define SPEAKER_FRONT_LEFT 0x1 +#define SPEAKER_FRONT_RIGHT 0x2 +#define SPEAKER_FRONT_CENTER 0x4 +#define SPEAKER_LOW_FREQUENCY 0x8 +#define SPEAKER_BACK_LEFT 0x10 +#define SPEAKER_BACK_RIGHT 0x20 +#define SPEAKER_FRONT_LEFT_OF_CENTER 0x40 +#define SPEAKER_FRONT_RIGHT_OF_CENTER 0x80 +#define SPEAKER_BACK_CENTER 0x100 +#define SPEAKER_SIDE_LEFT 0x200 +#define SPEAKER_SIDE_RIGHT 0x400 +#define SPEAKER_TOP_CENTER 0x800 +#define SPEAKER_TOP_FRONT_LEFT 0x1000 +#define SPEAKER_TOP_FRONT_CENTER 0x2000 +#define SPEAKER_TOP_FRONT_RIGHT 0x4000 +#define SPEAKER_TOP_BACK_LEFT 0x8000 +#define SPEAKER_TOP_BACK_CENTER 0x10000 +#define SPEAKER_TOP_BACK_RIGHT 0x20000 +#endif + +/* +Implement our own version of MA_WAVEFORMATEXTENSIBLE so we can avoid a header. Be careful with this +because MA_WAVEFORMATEX has an extra two bytes over standard WAVEFORMATEX due to padding. The +standard version uses tight packing, but for compiler compatibility we're not doing that with ours. +*/ +typedef struct +{ + WORD wFormatTag; + WORD nChannels; + DWORD nSamplesPerSec; + DWORD nAvgBytesPerSec; + WORD nBlockAlign; + WORD wBitsPerSample; + WORD cbSize; +} MA_WAVEFORMATEX; + +typedef struct +{ + WORD wFormatTag; + WORD nChannels; + DWORD nSamplesPerSec; + DWORD nAvgBytesPerSec; + WORD nBlockAlign; + WORD wBitsPerSample; + WORD cbSize; + union + { + WORD wValidBitsPerSample; + WORD wSamplesPerBlock; + WORD wReserved; + } Samples; + DWORD dwChannelMask; + GUID SubFormat; +} MA_WAVEFORMATEXTENSIBLE; + + + +#ifndef WAVE_FORMAT_EXTENSIBLE +#define WAVE_FORMAT_EXTENSIBLE 0xFFFE +#endif + +#ifndef WAVE_FORMAT_PCM +#define WAVE_FORMAT_PCM 1 +#endif + +#ifndef WAVE_FORMAT_IEEE_FLOAT +#define WAVE_FORMAT_IEEE_FLOAT 0x0003 +#endif + +/* Converts an individual Win32-style channel identifier (SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ +static ma_uint8 ma_channel_id_to_ma__win32(DWORD id) +{ + switch (id) + { + case SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE; + case SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT; + case SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER; + case SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + case SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to Win32-style. */ +static DWORD ma_channel_id_to_win32(DWORD id) +{ + switch (id) + { + case MA_CHANNEL_MONO: return SPEAKER_FRONT_CENTER; + case MA_CHANNEL_FRONT_LEFT: return SPEAKER_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return SPEAKER_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return SPEAKER_FRONT_CENTER; + case MA_CHANNEL_LFE: return SPEAKER_LOW_FREQUENCY; + case MA_CHANNEL_BACK_LEFT: return SPEAKER_BACK_LEFT; + case MA_CHANNEL_BACK_RIGHT: return SPEAKER_BACK_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return SPEAKER_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return SPEAKER_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return SPEAKER_BACK_CENTER; + case MA_CHANNEL_SIDE_LEFT: return SPEAKER_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return SPEAKER_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return SPEAKER_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return SPEAKER_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return SPEAKER_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return SPEAKER_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return SPEAKER_TOP_BACK_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return SPEAKER_TOP_BACK_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return SPEAKER_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts a channel mapping to a Win32-style channel mask. */ +static DWORD ma_channel_map_to_channel_mask__win32(const ma_channel* pChannelMap, ma_uint32 channels) +{ + DWORD dwChannelMask = 0; + ma_uint32 iChannel; + + for (iChannel = 0; iChannel < channels; ++iChannel) { + dwChannelMask |= ma_channel_id_to_win32(pChannelMap[iChannel]); + } + + return dwChannelMask; +} + +/* Converts a Win32-style channel mask to a miniaudio channel map. */ +static void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel* pChannelMap) +{ + /* If the channel mask is set to 0, just assume a default Win32 channel map. */ + if (dwChannelMask == 0) { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pChannelMap, channels, channels); + } else { + if (channels == 1 && (dwChannelMask & SPEAKER_FRONT_CENTER) != 0) { + pChannelMap[0] = MA_CHANNEL_MONO; + } else { + /* Just iterate over each bit. */ + ma_uint32 iChannel = 0; + ma_uint32 iBit; + + for (iBit = 0; iBit < 32 && iChannel < channels; ++iBit) { + DWORD bitValue = (dwChannelMask & (1UL << iBit)); + if (bitValue != 0) { + /* The bit is set. */ + pChannelMap[iChannel] = ma_channel_id_to_ma__win32(bitValue); + iChannel += 1; + } + } + } + } +} + +#ifdef __cplusplus +static ma_bool32 ma_is_guid_equal(const void* a, const void* b) +{ + return IsEqualGUID(*(const GUID*)a, *(const GUID*)b); +} +#else +#define ma_is_guid_equal(a, b) IsEqualGUID((const GUID*)a, (const GUID*)b) +#endif + +static MA_INLINE ma_bool32 ma_is_guid_null(const void* guid) +{ + static GUID nullguid = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}; + return ma_is_guid_equal(guid, &nullguid); +} + +static ma_format ma_format_from_WAVEFORMATEX(const MA_WAVEFORMATEX* pWF) +{ + MA_ASSERT(pWF != NULL); + + if (pWF->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + const MA_WAVEFORMATEXTENSIBLE* pWFEX = (const MA_WAVEFORMATEXTENSIBLE*)pWF; + if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_PCM)) { + if (pWFEX->Samples.wValidBitsPerSample == 32) { + return ma_format_s32; + } + if (pWFEX->Samples.wValidBitsPerSample == 24) { + if (pWFEX->wBitsPerSample == 32) { + return ma_format_s32; + } + if (pWFEX->wBitsPerSample == 24) { + return ma_format_s24; + } + } + if (pWFEX->Samples.wValidBitsPerSample == 16) { + return ma_format_s16; + } + if (pWFEX->Samples.wValidBitsPerSample == 8) { + return ma_format_u8; + } + } + if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) { + if (pWFEX->Samples.wValidBitsPerSample == 32) { + return ma_format_f32; + } + /* + if (pWFEX->Samples.wValidBitsPerSample == 64) { + return ma_format_f64; + } + */ + } + } else { + if (pWF->wFormatTag == WAVE_FORMAT_PCM) { + if (pWF->wBitsPerSample == 32) { + return ma_format_s32; + } + if (pWF->wBitsPerSample == 24) { + return ma_format_s24; + } + if (pWF->wBitsPerSample == 16) { + return ma_format_s16; + } + if (pWF->wBitsPerSample == 8) { + return ma_format_u8; + } + } + if (pWF->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) { + if (pWF->wBitsPerSample == 32) { + return ma_format_f32; + } + if (pWF->wBitsPerSample == 64) { + /*return ma_format_f64;*/ + } + } + } + + return ma_format_unknown; +} +#endif + + +/******************************************************************************* + +WASAPI Backend + +*******************************************************************************/ +#ifdef MA_HAS_WASAPI +#if 0 +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable:4091) /* 'typedef ': ignored on left of '' when no variable is declared */ +#endif +#include +#include +#if defined(_MSC_VER) +#pragma warning(pop) +#endif +#endif /* 0 */ + +static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType); + +/* Some compilers don't define VerifyVersionInfoW. Need to write this ourselves. */ +#define MA_WIN32_WINNT_VISTA 0x0600 +#define MA_VER_MINORVERSION 0x01 +#define MA_VER_MAJORVERSION 0x02 +#define MA_VER_SERVICEPACKMAJOR 0x20 +#define MA_VER_GREATER_EQUAL 0x03 + +typedef struct { + DWORD dwOSVersionInfoSize; + DWORD dwMajorVersion; + DWORD dwMinorVersion; + DWORD dwBuildNumber; + DWORD dwPlatformId; + WCHAR szCSDVersion[128]; + WORD wServicePackMajor; + WORD wServicePackMinor; + WORD wSuiteMask; + BYTE wProductType; + BYTE wReserved; +} ma_OSVERSIONINFOEXW; + +typedef BOOL (WINAPI * ma_PFNVerifyVersionInfoW) (ma_OSVERSIONINFOEXW* lpVersionInfo, DWORD dwTypeMask, DWORDLONG dwlConditionMask); +typedef ULONGLONG (WINAPI * ma_PFNVerSetConditionMask)(ULONGLONG dwlConditionMask, DWORD dwTypeBitMask, BYTE dwConditionMask); + + +#ifndef PROPERTYKEY_DEFINED +#define PROPERTYKEY_DEFINED +#ifndef __WATCOMC__ +typedef struct +{ + GUID fmtid; + DWORD pid; +} PROPERTYKEY; +#endif +#endif + +/* Some compilers don't define PropVariantInit(). We just do this ourselves since it's just a memset(). */ +static MA_INLINE void ma_PropVariantInit(MA_PROPVARIANT* pProp) +{ + MA_ZERO_OBJECT(pProp); +} + + +static const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14}; +static const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0}; + +static const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */ +#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK) +static const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */ +#endif + +static const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */ +static const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */ +static const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */ +static const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */ +static const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */ +static const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */ +#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK) +static const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */ +static const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */ +static const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */ +#endif + +static const IID MA_CLSID_MMDeviceEnumerator = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */ +static const IID MA_IID_IMMDeviceEnumerator = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */ + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +#define MA_MM_DEVICE_STATE_ACTIVE 1 +#define MA_MM_DEVICE_STATE_DISABLED 2 +#define MA_MM_DEVICE_STATE_NOTPRESENT 4 +#define MA_MM_DEVICE_STATE_UNPLUGGED 8 + +typedef struct ma_IMMDeviceEnumerator ma_IMMDeviceEnumerator; +typedef struct ma_IMMDeviceCollection ma_IMMDeviceCollection; +typedef struct ma_IMMDevice ma_IMMDevice; +#else +typedef struct ma_IActivateAudioInterfaceCompletionHandler ma_IActivateAudioInterfaceCompletionHandler; +typedef struct ma_IActivateAudioInterfaceAsyncOperation ma_IActivateAudioInterfaceAsyncOperation; +#endif +typedef struct ma_IPropertyStore ma_IPropertyStore; +typedef struct ma_IAudioClient ma_IAudioClient; +typedef struct ma_IAudioClient2 ma_IAudioClient2; +typedef struct ma_IAudioClient3 ma_IAudioClient3; +typedef struct ma_IAudioRenderClient ma_IAudioRenderClient; +typedef struct ma_IAudioCaptureClient ma_IAudioCaptureClient; + +typedef ma_int64 MA_REFERENCE_TIME; + +#define MA_AUDCLNT_STREAMFLAGS_CROSSPROCESS 0x00010000 +#define MA_AUDCLNT_STREAMFLAGS_LOOPBACK 0x00020000 +#define MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK 0x00040000 +#define MA_AUDCLNT_STREAMFLAGS_NOPERSIST 0x00080000 +#define MA_AUDCLNT_STREAMFLAGS_RATEADJUST 0x00100000 +#define MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000 +#define MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000 +#define MA_AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED 0x10000000 +#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE 0x20000000 +#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED 0x40000000 + +/* Buffer flags. */ +#define MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY 1 +#define MA_AUDCLNT_BUFFERFLAGS_SILENT 2 +#define MA_AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR 4 + +typedef enum +{ + ma_eRender = 0, + ma_eCapture = 1, + ma_eAll = 2 +} ma_EDataFlow; + +typedef enum +{ + ma_eConsole = 0, + ma_eMultimedia = 1, + ma_eCommunications = 2 +} ma_ERole; + +typedef enum +{ + MA_AUDCLNT_SHAREMODE_SHARED, + MA_AUDCLNT_SHAREMODE_EXCLUSIVE +} MA_AUDCLNT_SHAREMODE; + +typedef enum +{ + MA_AudioCategory_Other = 0 /* <-- miniaudio is only caring about Other. */ +} MA_AUDIO_STREAM_CATEGORY; + +typedef struct +{ + ma_uint32 cbSize; + BOOL bIsOffload; + MA_AUDIO_STREAM_CATEGORY eCategory; +} ma_AudioClientProperties; + +/* IUnknown */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IUnknown* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IUnknown* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IUnknown* pThis); +} ma_IUnknownVtbl; +struct ma_IUnknown +{ + ma_IUnknownVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); } + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +/* IMMNotificationClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMNotificationClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMNotificationClient* pThis); + + /* IMMNotificationClient */ + HRESULT (STDMETHODCALLTYPE * OnDeviceStateChanged) (ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, DWORD dwNewState); + HRESULT (STDMETHODCALLTYPE * OnDeviceAdded) (ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID); + HRESULT (STDMETHODCALLTYPE * OnDeviceRemoved) (ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID); + HRESULT (STDMETHODCALLTYPE * OnDefaultDeviceChanged)(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, const WCHAR* pDefaultDeviceID); + HRESULT (STDMETHODCALLTYPE * OnPropertyValueChanged)(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, const PROPERTYKEY key); +} ma_IMMNotificationClientVtbl; + +/* IMMDeviceEnumerator */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceEnumerator* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceEnumerator* pThis); + + /* IMMDeviceEnumerator */ + HRESULT (STDMETHODCALLTYPE * EnumAudioEndpoints) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices); + HRESULT (STDMETHODCALLTYPE * GetDefaultAudioEndpoint) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint); + HRESULT (STDMETHODCALLTYPE * GetDevice) (ma_IMMDeviceEnumerator* pThis, const WCHAR* pID, ma_IMMDevice** ppDevice); + HRESULT (STDMETHODCALLTYPE * RegisterEndpointNotificationCallback) (ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient); + HRESULT (STDMETHODCALLTYPE * UnregisterEndpointNotificationCallback)(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient); +} ma_IMMDeviceEnumeratorVtbl; +struct ma_IMMDeviceEnumerator +{ + ma_IMMDeviceEnumeratorVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); } +static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); } +static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, const WCHAR* pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); } +static MA_INLINE HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); } +static MA_INLINE HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); } + + +/* IMMDeviceCollection */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceCollection* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceCollection* pThis); + + /* IMMDeviceCollection */ + HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IMMDeviceCollection* pThis, UINT* pDevices); + HRESULT (STDMETHODCALLTYPE * Item) (ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice); +} ma_IMMDeviceCollectionVtbl; +struct ma_IMMDeviceCollection +{ + ma_IMMDeviceCollectionVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); } +static MA_INLINE HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); } + + +/* IMMDevice */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDevice* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDevice* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDevice* pThis); + + /* IMMDevice */ + HRESULT (STDMETHODCALLTYPE * Activate) (ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, MA_PROPVARIANT* pActivationParams, void** ppInterface); + HRESULT (STDMETHODCALLTYPE * OpenPropertyStore)(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties); + HRESULT (STDMETHODCALLTYPE * GetId) (ma_IMMDevice* pThis, WCHAR** pID); + HRESULT (STDMETHODCALLTYPE * GetState) (ma_IMMDevice* pThis, DWORD *pState); +} ma_IMMDeviceVtbl; +struct ma_IMMDevice +{ + ma_IMMDeviceVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, MA_PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); } +static MA_INLINE HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); } +static MA_INLINE HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, WCHAR** pID) { return pThis->lpVtbl->GetId(pThis, pID); } +static MA_INLINE HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); } +#else +/* IActivateAudioInterfaceAsyncOperation */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IActivateAudioInterfaceAsyncOperation* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IActivateAudioInterfaceAsyncOperation* pThis); + + /* IActivateAudioInterfaceAsyncOperation */ + HRESULT (STDMETHODCALLTYPE * GetActivateResult)(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface); +} ma_IActivateAudioInterfaceAsyncOperationVtbl; +struct ma_IActivateAudioInterfaceAsyncOperation +{ + ma_IActivateAudioInterfaceAsyncOperationVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); } +#endif + +/* IPropertyStore */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IPropertyStore* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IPropertyStore* pThis); + + /* IPropertyStore */ + HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IPropertyStore* pThis, DWORD* pPropCount); + HRESULT (STDMETHODCALLTYPE * GetAt) (ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey); + HRESULT (STDMETHODCALLTYPE * GetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, MA_PROPVARIANT* pPropVar); + HRESULT (STDMETHODCALLTYPE * SetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const MA_PROPVARIANT* const pPropVar); + HRESULT (STDMETHODCALLTYPE * Commit) (ma_IPropertyStore* pThis); +} ma_IPropertyStoreVtbl; +struct ma_IPropertyStore +{ + ma_IPropertyStoreVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); } +static MA_INLINE HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); } +static MA_INLINE HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, MA_PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); } +static MA_INLINE HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const MA_PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); } +static MA_INLINE HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); } + + +/* IAudioClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient* pThis, MA_WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient* pThis, const IID* const riid, void** pp); +} ma_IAudioClientVtbl; +struct ma_IAudioClient +{ + ma_IAudioClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, MA_WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } + +/* IAudioClient2 */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient2* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient2* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient2* pThis, MA_WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient2* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient2* pThis, const IID* const riid, void** pp); + + /* IAudioClient2 */ + HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable); + HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties); + HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient2* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration); +} ma_IAudioClient2Vtbl; +struct ma_IAudioClient2 +{ + ma_IAudioClient2Vtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, MA_WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static MA_INLINE HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static MA_INLINE HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } + + +/* IAudioClient3 */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient3* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient3* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient3* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient3* pThis, const IID* const riid, void** pp); + + /* IAudioClient2 */ + HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable); + HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties); + HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration); + + /* IAudioClient3 */ + HRESULT (STDMETHODCALLTYPE * GetSharedModeEnginePeriod) (ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, ma_uint32* pDefaultPeriodInFrames, ma_uint32* pFundamentalPeriodInFrames, ma_uint32* pMinPeriodInFrames, ma_uint32* pMaxPeriodInFrames); + HRESULT (STDMETHODCALLTYPE * GetCurrentSharedModeEnginePeriod)(ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppFormat, ma_uint32* pCurrentPeriodInFrames); + HRESULT (STDMETHODCALLTYPE * InitializeSharedAudioStream) (ma_IAudioClient3* pThis, DWORD streamFlags, ma_uint32 periodInFrames, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); +} ma_IAudioClient3Vtbl; +struct ma_IAudioClient3 +{ + ma_IAudioClient3Vtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static MA_INLINE HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static MA_INLINE HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } +static MA_INLINE HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, ma_uint32* pDefaultPeriodInFrames, ma_uint32* pFundamentalPeriodInFrames, ma_uint32* pMinPeriodInFrames, ma_uint32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppFormat, ma_uint32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, ma_uint32 periodInFrames, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); } + + +/* IAudioRenderClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioRenderClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioRenderClient* pThis); + + /* IAudioRenderClient */ + HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData); + HRESULT (STDMETHODCALLTYPE * ReleaseBuffer)(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags); +} ma_IAudioRenderClientVtbl; +struct ma_IAudioRenderClient +{ + ma_IAudioRenderClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); } +static MA_INLINE HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); } + + +/* IAudioCaptureClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioCaptureClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioCaptureClient* pThis); + + /* IAudioRenderClient */ + HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition); + HRESULT (STDMETHODCALLTYPE * ReleaseBuffer) (ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead); + HRESULT (STDMETHODCALLTYPE * GetNextPacketSize)(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket); +} ma_IAudioCaptureClientVtbl; +struct ma_IAudioCaptureClient +{ + ma_IAudioCaptureClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); } + +#if defined(MA_WIN32_UWP) +/* mmdevapi Functions */ +typedef HRESULT (WINAPI * MA_PFN_ActivateAudioInterfaceAsync)(const wchar_t* deviceInterfacePath, const IID* riid, MA_PROPVARIANT* activationParams, ma_IActivateAudioInterfaceCompletionHandler* completionHandler, ma_IActivateAudioInterfaceAsyncOperation** activationOperation); +#endif + +/* Avrt Functions */ +typedef HANDLE (WINAPI * MA_PFN_AvSetMmThreadCharacteristicsA)(const char* TaskName, DWORD* TaskIndex); +typedef BOOL (WINAPI * MA_PFN_AvRevertMmThreadCharacteristics)(HANDLE AvrtHandle); + +#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK) +typedef struct ma_completion_handler_uwp ma_completion_handler_uwp; + +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_completion_handler_uwp* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_completion_handler_uwp* pThis); + + /* IActivateAudioInterfaceCompletionHandler */ + HRESULT (STDMETHODCALLTYPE * ActivateCompleted)(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation); +} ma_completion_handler_uwp_vtbl; +struct ma_completion_handler_uwp +{ + ma_completion_handler_uwp_vtbl* lpVtbl; + MA_ATOMIC(4, ma_uint32) counter; + HANDLE hEvent; +}; + +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject) +{ + /* + We need to "implement" IAgileObject which is just an indicator that's used internally by WASAPI for some multithreading management. To + "implement" this, we just make sure we return pThis when the IAgileObject is requested. + */ + if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IActivateAudioInterfaceCompletionHandler) && !ma_is_guid_equal(riid, &MA_IID_IAgileObject)) { + *ppObject = NULL; + return E_NOINTERFACE; + } + + /* Getting here means the IID is IUnknown or IMMNotificationClient. */ + *ppObject = (void*)pThis; + ((ma_completion_handler_uwp_vtbl*)pThis->lpVtbl)->AddRef(pThis); + return S_OK; +} + +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis) +{ + return (ULONG)ma_atomic_fetch_add_32(&pThis->counter, 1) + 1; +} + +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis) +{ + ma_uint32 newRefCount = ma_atomic_fetch_sub_32(&pThis->counter, 1) - 1; + if (newRefCount == 0) { + return 0; /* We don't free anything here because we never allocate the object on the heap. */ + } + + return (ULONG)newRefCount; +} + +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation) +{ + (void)pActivateOperation; + SetEvent(pThis->hEvent); + return S_OK; +} + + +static ma_completion_handler_uwp_vtbl g_maCompletionHandlerVtblInstance = { + ma_completion_handler_uwp_QueryInterface, + ma_completion_handler_uwp_AddRef, + ma_completion_handler_uwp_Release, + ma_completion_handler_uwp_ActivateCompleted +}; + +static ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler) +{ + MA_ASSERT(pHandler != NULL); + MA_ZERO_OBJECT(pHandler); + + pHandler->lpVtbl = &g_maCompletionHandlerVtblInstance; + pHandler->counter = 1; + pHandler->hEvent = CreateEventA(NULL, FALSE, FALSE, NULL); + if (pHandler->hEvent == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler) +{ + if (pHandler->hEvent != NULL) { + CloseHandle(pHandler->hEvent); + } +} + +static void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler) +{ + WaitForSingleObject((HANDLE)pHandler->hEvent, INFINITE); +} +#endif /* !MA_WIN32_DESKTOP */ + +/* We need a virtual table for our notification client object that's used for detecting changes to the default device. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject) +{ + /* + We care about two interfaces - IUnknown and IMMNotificationClient. If the requested IID is something else + we just return E_NOINTERFACE. Otherwise we need to increment the reference counter and return S_OK. + */ + if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IMMNotificationClient)) { + *ppObject = NULL; + return E_NOINTERFACE; + } + + /* Getting here means the IID is IUnknown or IMMNotificationClient. */ + *ppObject = (void*)pThis; + ((ma_IMMNotificationClientVtbl*)pThis->lpVtbl)->AddRef(pThis); + return S_OK; +} + +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis) +{ + return (ULONG)ma_atomic_fetch_add_32(&pThis->counter, 1) + 1; +} + +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis) +{ + ma_uint32 newRefCount = ma_atomic_fetch_sub_32(&pThis->counter, 1) - 1; + if (newRefCount == 0) { + return 0; /* We don't free anything here because we never allocate the object on the heap. */ + } + + return (ULONG)newRefCount; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, DWORD dwNewState) +{ + ma_bool32 isThisDevice = MA_FALSE; + ma_bool32 isCapture = MA_FALSE; + ma_bool32 isPlayback = MA_FALSE; + +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState);*/ +#endif + + /* + There have been reports of a hang when a playback device is disconnected. The idea with this code is to explicitly stop the device if we detect + that the device is disabled or has been unplugged. + */ + if (pThis->pDevice->wasapi.allowCaptureAutoStreamRouting && (pThis->pDevice->type == ma_device_type_capture || pThis->pDevice->type == ma_device_type_duplex || pThis->pDevice->type == ma_device_type_loopback)) { + isCapture = MA_TRUE; + if (ma_strcmp_WCHAR(pThis->pDevice->capture.id.wasapi, pDeviceID) == 0) { + isThisDevice = MA_TRUE; + } + } + + if (pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting && (pThis->pDevice->type == ma_device_type_playback || pThis->pDevice->type == ma_device_type_duplex)) { + isPlayback = MA_TRUE; + if (ma_strcmp_WCHAR(pThis->pDevice->playback.id.wasapi, pDeviceID) == 0) { + isThisDevice = MA_TRUE; + } + } + + + /* + If the device ID matches our device we need to mark our device as detached and stop it. When a + device is added in OnDeviceAdded(), we'll restart it. We only mark it as detached if the device + was started at the time of being removed. + */ + if (isThisDevice) { + if ((dwNewState & MA_MM_DEVICE_STATE_ACTIVE) == 0) { + /* + Unplugged or otherwise unavailable. Mark as detached if we were in a playing state. We'll + use this to determine whether or not we need to automatically start the device when it's + plugged back in again. + */ + if (ma_device_get_state(pThis->pDevice) == ma_device_state_started) { + if (isPlayback) { + pThis->pDevice->wasapi.isDetachedPlayback = MA_TRUE; + } + if (isCapture) { + pThis->pDevice->wasapi.isDetachedCapture = MA_TRUE; + } + + ma_device_stop(pThis->pDevice); + } + } + + if ((dwNewState & MA_MM_DEVICE_STATE_ACTIVE) != 0) { + /* The device was activated. If we were detached, we need to start it again. */ + ma_bool8 tryRestartingDevice = MA_FALSE; + + if (isPlayback) { + if (pThis->pDevice->wasapi.isDetachedPlayback) { + pThis->pDevice->wasapi.isDetachedPlayback = MA_FALSE; + ma_device_reroute__wasapi(pThis->pDevice, ma_device_type_playback); + tryRestartingDevice = MA_TRUE; + } + } + + if (isCapture) { + if (pThis->pDevice->wasapi.isDetachedCapture) { + pThis->pDevice->wasapi.isDetachedCapture = MA_FALSE; + ma_device_reroute__wasapi(pThis->pDevice, (pThis->pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture); + tryRestartingDevice = MA_TRUE; + } + } + + if (tryRestartingDevice) { + if (pThis->pDevice->wasapi.isDetachedPlayback == MA_FALSE && pThis->pDevice->wasapi.isDetachedCapture == MA_FALSE) { + ma_device_start(pThis->pDevice); + } + } + } + } + + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/ +#endif + + /* We don't need to worry about this event for our purposes. */ + (void)pThis; + (void)pDeviceID; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/ +#endif + + /* We don't need to worry about this event for our purposes. */ + (void)pThis; + (void)pDeviceID; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, const WCHAR* pDefaultDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)");*/ +#endif + + (void)role; + + /* We only care about devices with the same data flow as the current device. */ + if ((pThis->pDevice->type == ma_device_type_playback && dataFlow != ma_eRender) || + (pThis->pDevice->type == ma_device_type_capture && dataFlow != ma_eCapture) || + (pThis->pDevice->type == ma_device_type_loopback && dataFlow != ma_eRender)) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because dataFlow does match device type.\n"); + return S_OK; + } + + /* We need to consider dataFlow as ma_eCapture if device is ma_device_type_loopback */ + if (pThis->pDevice->type == ma_device_type_loopback) { + dataFlow = ma_eCapture; + } + + /* Don't do automatic stream routing if we're not allowed. */ + if ((dataFlow == ma_eRender && pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting == MA_FALSE) || + (dataFlow == ma_eCapture && pThis->pDevice->wasapi.allowCaptureAutoStreamRouting == MA_FALSE)) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because automatic stream routing has been disabled by the device config.\n"); + return S_OK; + } + + /* + Not currently supporting automatic stream routing in exclusive mode. This is not working correctly on my machine due to + AUDCLNT_E_DEVICE_IN_USE errors when reinitializing the device. If this is a bug in miniaudio, we can try re-enabling this once + it's fixed. + */ + if ((dataFlow == ma_eRender && pThis->pDevice->playback.shareMode == ma_share_mode_exclusive) || + (dataFlow == ma_eCapture && pThis->pDevice->capture.shareMode == ma_share_mode_exclusive)) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because the device shared mode is exclusive.\n"); + return S_OK; + } + + + + /* + Second attempt at device rerouting. We're going to retrieve the device's state at the time of + the route change. We're then going to stop the device, reinitialize the device, and then start + it again if the state before stopping was ma_device_state_started. + */ + { + ma_uint32 previousState = ma_device_get_state(pThis->pDevice); + ma_bool8 restartDevice = MA_FALSE; + + if (previousState == ma_device_state_uninitialized || previousState == ma_device_state_starting) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because the device is in the process of starting.\n"); + return S_OK; + } + + if (previousState == ma_device_state_started) { + ma_device_stop(pThis->pDevice); + restartDevice = MA_TRUE; + } + + if (pDefaultDeviceID != NULL) { /* <-- The input device ID will be null if there's no other device available. */ + ma_mutex_lock(&pThis->pDevice->wasapi.rerouteLock); + { + if (dataFlow == ma_eRender) { + ma_device_reroute__wasapi(pThis->pDevice, ma_device_type_playback); + + if (pThis->pDevice->wasapi.isDetachedPlayback) { + pThis->pDevice->wasapi.isDetachedPlayback = MA_FALSE; + + if (pThis->pDevice->type == ma_device_type_duplex && pThis->pDevice->wasapi.isDetachedCapture) { + restartDevice = MA_FALSE; /* It's a duplex device and the capture side is detached. We cannot be restarting the device just yet. */ + } + else { + restartDevice = MA_TRUE; /* It's not a duplex device, or the capture side is also attached so we can go ahead and restart the device. */ + } + } + } + else { + ma_device_reroute__wasapi(pThis->pDevice, (pThis->pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture); + + if (pThis->pDevice->wasapi.isDetachedCapture) { + pThis->pDevice->wasapi.isDetachedCapture = MA_FALSE; + + if (pThis->pDevice->type == ma_device_type_duplex && pThis->pDevice->wasapi.isDetachedPlayback) { + restartDevice = MA_FALSE; /* It's a duplex device and the playback side is detached. We cannot be restarting the device just yet. */ + } + else { + restartDevice = MA_TRUE; /* It's not a duplex device, or the playback side is also attached so we can go ahead and restart the device. */ + } + } + } + } + ma_mutex_unlock(&pThis->pDevice->wasapi.rerouteLock); + + if (restartDevice) { + ma_device_start(pThis->pDevice); + } + } + } + + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, const PROPERTYKEY key) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/ +#endif + + (void)pThis; + (void)pDeviceID; + (void)key; + return S_OK; +} + +static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = { + ma_IMMNotificationClient_QueryInterface, + ma_IMMNotificationClient_AddRef, + ma_IMMNotificationClient_Release, + ma_IMMNotificationClient_OnDeviceStateChanged, + ma_IMMNotificationClient_OnDeviceAdded, + ma_IMMNotificationClient_OnDeviceRemoved, + ma_IMMNotificationClient_OnDefaultDeviceChanged, + ma_IMMNotificationClient_OnPropertyValueChanged +}; +#endif /* MA_WIN32_DESKTOP */ + +static const char* ma_to_usage_string__wasapi(ma_wasapi_usage usage) +{ + switch (usage) + { + case ma_wasapi_usage_default: return NULL; + case ma_wasapi_usage_games: return "Games"; + case ma_wasapi_usage_pro_audio: return "Pro Audio"; + default: break; + } + + return NULL; +} + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +typedef ma_IMMDevice ma_WASAPIDeviceInterface; +#else +typedef ma_IUnknown ma_WASAPIDeviceInterface; +#endif + + +#define MA_CONTEXT_COMMAND_QUIT__WASAPI 1 +#define MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI 2 +#define MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI 3 + +static ma_context_command__wasapi ma_context_init_command__wasapi(int code) +{ + ma_context_command__wasapi cmd; + + MA_ZERO_OBJECT(&cmd); + cmd.code = code; + + return cmd; +} + +static ma_result ma_context_post_command__wasapi(ma_context* pContext, const ma_context_command__wasapi* pCmd) +{ + /* For now we are doing everything synchronously, but I might relax this later if the need arises. */ + ma_result result; + ma_bool32 isUsingLocalEvent = MA_FALSE; + ma_event localEvent; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCmd != NULL); + + if (pCmd->pEvent == NULL) { + isUsingLocalEvent = MA_TRUE; + + result = ma_event_init(&localEvent); + if (result != MA_SUCCESS) { + return result; /* Failed to create the event for this command. */ + } + } + + /* Here is where we add the command to the list. If there's not enough room we'll spin until there is. */ + ma_mutex_lock(&pContext->wasapi.commandLock); + { + ma_uint32 index; + + /* Spin until we've got some space available. */ + while (pContext->wasapi.commandCount == ma_countof(pContext->wasapi.commands)) { + ma_yield(); + } + + /* Space is now available. Can safely add to the list. */ + index = (pContext->wasapi.commandIndex + pContext->wasapi.commandCount) % ma_countof(pContext->wasapi.commands); + pContext->wasapi.commands[index] = *pCmd; + pContext->wasapi.commands[index].pEvent = &localEvent; + pContext->wasapi.commandCount += 1; + + /* Now that the command has been added, release the semaphore so ma_context_next_command__wasapi() can return. */ + ma_semaphore_release(&pContext->wasapi.commandSem); + } + ma_mutex_unlock(&pContext->wasapi.commandLock); + + if (isUsingLocalEvent) { + ma_event_wait(&localEvent); + ma_event_uninit(&localEvent); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_next_command__wasapi(ma_context* pContext, ma_context_command__wasapi* pCmd) +{ + ma_result result = MA_SUCCESS; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCmd != NULL); + + result = ma_semaphore_wait(&pContext->wasapi.commandSem); + if (result == MA_SUCCESS) { + ma_mutex_lock(&pContext->wasapi.commandLock); + { + *pCmd = pContext->wasapi.commands[pContext->wasapi.commandIndex]; + pContext->wasapi.commandIndex = (pContext->wasapi.commandIndex + 1) % ma_countof(pContext->wasapi.commands); + pContext->wasapi.commandCount -= 1; + } + ma_mutex_unlock(&pContext->wasapi.commandLock); + } + + return result; +} + +static ma_thread_result MA_THREADCALL ma_context_command_thread__wasapi(void* pUserData) +{ + ma_result result; + ma_context* pContext = (ma_context*)pUserData; + MA_ASSERT(pContext != NULL); + + for (;;) { + ma_context_command__wasapi cmd; + result = ma_context_next_command__wasapi(pContext, &cmd); + if (result != MA_SUCCESS) { + break; + } + + switch (cmd.code) + { + case MA_CONTEXT_COMMAND_QUIT__WASAPI: + { + /* Do nothing. Handled after the switch. */ + } break; + + case MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI: + { + if (cmd.data.createAudioClient.deviceType == ma_device_type_playback) { + *cmd.data.createAudioClient.pResult = ma_result_from_HRESULT(ma_IAudioClient_GetService((ma_IAudioClient*)cmd.data.createAudioClient.pAudioClient, &MA_IID_IAudioRenderClient, cmd.data.createAudioClient.ppAudioClientService)); + } else { + *cmd.data.createAudioClient.pResult = ma_result_from_HRESULT(ma_IAudioClient_GetService((ma_IAudioClient*)cmd.data.createAudioClient.pAudioClient, &MA_IID_IAudioCaptureClient, cmd.data.createAudioClient.ppAudioClientService)); + } + } break; + + case MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI: + { + if (cmd.data.releaseAudioClient.deviceType == ma_device_type_playback) { + if (cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback); + cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback = NULL; + } + } + + if (cmd.data.releaseAudioClient.deviceType == ma_device_type_capture) { + if (cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture); + cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture = NULL; + } + } + } break; + + default: + { + /* Unknown command. Ignore it, but trigger an assert in debug mode so we're aware of it. */ + MA_ASSERT(MA_FALSE); + } break; + } + + if (cmd.pEvent != NULL) { + ma_event_signal(cmd.pEvent); + } + + if (cmd.code == MA_CONTEXT_COMMAND_QUIT__WASAPI) { + break; /* Received a quit message. Get out of here. */ + } + } + + return (ma_thread_result)0; +} + +static ma_result ma_device_create_IAudioClient_service__wasapi(ma_context* pContext, ma_device_type deviceType, ma_IAudioClient* pAudioClient, void** ppAudioClientService) +{ + ma_result result; + ma_result cmdResult; + ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI); + cmd.data.createAudioClient.deviceType = deviceType; + cmd.data.createAudioClient.pAudioClient = (void*)pAudioClient; + cmd.data.createAudioClient.ppAudioClientService = ppAudioClientService; + cmd.data.createAudioClient.pResult = &cmdResult; /* Declared locally, but won't be dereferenced after this function returns since execution of the command will wait here. */ + + result = ma_context_post_command__wasapi(pContext, &cmd); /* This will not return until the command has actually been run. */ + if (result != MA_SUCCESS) { + return result; + } + + return *cmd.data.createAudioClient.pResult; +} + +#if 0 /* Not used at the moment, but leaving here for future use. */ +static ma_result ma_device_release_IAudioClient_service__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI); + cmd.data.releaseAudioClient.pDevice = pDevice; + cmd.data.releaseAudioClient.deviceType = deviceType; + + result = ma_context_post_command__wasapi(pDevice->pContext, &cmd); /* This will not return until the command has actually been run. */ + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} +#endif + + +static void ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(const MA_WAVEFORMATEX* pWF, ma_share_mode shareMode, ma_device_info* pInfo) +{ + MA_ASSERT(pWF != NULL); + MA_ASSERT(pInfo != NULL); + + if (pInfo->nativeDataFormatCount >= ma_countof(pInfo->nativeDataFormats)) { + return; /* Too many data formats. Need to ignore this one. Don't think this should ever happen with WASAPI. */ + } + + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].format = ma_format_from_WAVEFORMATEX(pWF); + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].channels = pWF->nChannels; + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].sampleRate = pWF->nSamplesPerSec; + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].flags = (shareMode == ma_share_mode_exclusive) ? MA_DATA_FORMAT_FLAG_EXCLUSIVE_MODE : 0; + pInfo->nativeDataFormatCount += 1; +} + +static ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_device_info* pInfo) +{ + HRESULT hr; + MA_WAVEFORMATEX* pWF = NULL; + + MA_ASSERT(pAudioClient != NULL); + MA_ASSERT(pInfo != NULL); + + /* Shared Mode. We use GetMixFormat() here. */ + hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pAudioClient, (MA_WAVEFORMATEX**)&pWF); + if (SUCCEEDED(hr)) { + ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(pWF, ma_share_mode_shared, pInfo); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve mix format for device info retrieval."); + return ma_result_from_HRESULT(hr); + } + + /* + Exlcusive Mode. We repeatedly call IsFormatSupported() here. This is not currently supported on + UWP. Failure to retrieve the exclusive mode format is not considered an error, so from here on + out, MA_SUCCESS is guaranteed to be returned. + */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + { + ma_IPropertyStore *pProperties; + + /* + The first thing to do is get the format from PKEY_AudioEngine_DeviceFormat. This should give us a channel count we assume is + correct which will simplify our searching. + */ + hr = ma_IMMDevice_OpenPropertyStore((ma_IMMDevice*)pMMDevice, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT var; + ma_PropVariantInit(&var); + + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_AudioEngine_DeviceFormat, &var); + if (SUCCEEDED(hr)) { + pWF = (MA_WAVEFORMATEX*)var.blob.pBlobData; + + /* + In my testing, the format returned by PKEY_AudioEngine_DeviceFormat is suitable for exclusive mode so we check this format + first. If this fails, fall back to a search. + */ + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pWF, NULL); + if (SUCCEEDED(hr)) { + /* The format returned by PKEY_AudioEngine_DeviceFormat is supported. */ + ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(pWF, ma_share_mode_exclusive, pInfo); + } else { + /* + The format returned by PKEY_AudioEngine_DeviceFormat is not supported, so fall back to a search. We assume the channel + count returned by MA_PKEY_AudioEngine_DeviceFormat is valid and correct. For simplicity we're only returning one format. + */ + ma_uint32 channels = pWF->nChannels; + ma_channel defaultChannelMap[MA_MAX_CHANNELS]; + MA_WAVEFORMATEXTENSIBLE wf; + ma_bool32 found; + ma_uint32 iFormat; + + /* Make sure we don't overflow the channel map. */ + if (channels > MA_MAX_CHANNELS) { + channels = MA_MAX_CHANNELS; + } + + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, defaultChannelMap, ma_countof(defaultChannelMap), channels); + + MA_ZERO_OBJECT(&wf); + wf.cbSize = sizeof(wf); + wf.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + wf.nChannels = (WORD)channels; + wf.dwChannelMask = ma_channel_map_to_channel_mask__win32(defaultChannelMap, channels); + + found = MA_FALSE; + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); ++iFormat) { + ma_format format = g_maFormatPriorities[iFormat]; + ma_uint32 iSampleRate; + + wf.wBitsPerSample = (WORD)(ma_get_bytes_per_sample(format)*8); + wf.nBlockAlign = (WORD)(wf.nChannels * wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nBlockAlign * wf.nSamplesPerSec; + wf.Samples.wValidBitsPerSample = /*(format == ma_format_s24_32) ? 24 :*/ wf.wBitsPerSample; + if (format == ma_format_f32) { + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + } else { + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + } + + for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iSampleRate) { + wf.nSamplesPerSec = g_maStandardSampleRatePriorities[iSampleRate]; + + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, (MA_WAVEFORMATEX*)&wf, NULL); + if (SUCCEEDED(hr)) { + ma_add_native_data_format_to_device_info_from_WAVEFORMATEX((MA_WAVEFORMATEX*)&wf, ma_share_mode_exclusive, pInfo); + found = MA_TRUE; + break; + } + } + + if (found) { + break; + } + } + + ma_PropVariantClear(pContext, &var); + + if (!found) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to find suitable device format for device info retrieval."); + } + } + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to retrieve device format for device info retrieval."); + } + + ma_IPropertyStore_Release(pProperties); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to open property store for device info retrieval."); + } + } +#else + { + (void)pMMDevice; /* Unused. */ + } +#endif + + return MA_SUCCESS; +} + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +static ma_EDataFlow ma_device_type_to_EDataFlow(ma_device_type deviceType) +{ + if (deviceType == ma_device_type_playback) { + return ma_eRender; + } else if (deviceType == ma_device_type_capture) { + return ma_eCapture; + } else { + MA_ASSERT(MA_FALSE); + return ma_eRender; /* Should never hit this. */ + } +} + +static ma_result ma_context_create_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator** ppDeviceEnumerator) +{ + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDeviceEnumerator != NULL); + + *ppDeviceEnumerator = NULL; /* Safety. */ + + hr = ma_CoCreateInstance(pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator."); + return ma_result_from_HRESULT(hr); + } + + *ppDeviceEnumerator = pDeviceEnumerator; + + return MA_SUCCESS; +} + +static WCHAR* ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType) +{ + HRESULT hr; + ma_IMMDevice* pMMDefaultDevice = NULL; + WCHAR* pDefaultDeviceID = NULL; + ma_EDataFlow dataFlow; + ma_ERole role; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceEnumerator != NULL); + + (void)pContext; + + /* Grab the EDataFlow type from the device type. */ + dataFlow = ma_device_type_to_EDataFlow(deviceType); + + /* The role is always eConsole, but we may make this configurable later. */ + role = ma_eConsole; + + hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, dataFlow, role, &pMMDefaultDevice); + if (FAILED(hr)) { + return NULL; + } + + hr = ma_IMMDevice_GetId(pMMDefaultDevice, &pDefaultDeviceID); + + ma_IMMDevice_Release(pMMDefaultDevice); + pMMDefaultDevice = NULL; + + if (FAILED(hr)) { + return NULL; + } + + return pDefaultDeviceID; +} + +static WCHAR* ma_context_get_default_device_id__wasapi(ma_context* pContext, ma_device_type deviceType) /* Free the returned pointer with ma_CoTaskMemFree() */ +{ + ma_result result; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + WCHAR* pDefaultDeviceID = NULL; + + MA_ASSERT(pContext != NULL); + + result = ma_context_create_IMMDeviceEnumerator__wasapi(pContext, &pDeviceEnumerator); + if (result != MA_SUCCESS) { + return NULL; + } + + pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType); + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + return pDefaultDeviceID; +} + +static ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice) +{ + ma_IMMDeviceEnumerator* pDeviceEnumerator; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppMMDevice != NULL); + + hr = ma_CoCreateInstance(pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create IMMDeviceEnumerator.\n"); + return ma_result_from_HRESULT(hr); + } + + if (pDeviceID == NULL) { + hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, (deviceType == ma_device_type_capture) ? ma_eCapture : ma_eRender, ma_eConsole, ppMMDevice); + } else { + hr = ma_IMMDeviceEnumerator_GetDevice(pDeviceEnumerator, pDeviceID->wasapi, ppMMDevice); + } + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve IMMDevice.\n"); + return ma_result_from_HRESULT(hr); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_id_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_device_id* pDeviceID) +{ + WCHAR* pDeviceIDString; + HRESULT hr; + + MA_ASSERT(pDeviceID != NULL); + + hr = ma_IMMDevice_GetId(pMMDevice, &pDeviceIDString); + if (SUCCEEDED(hr)) { + size_t idlen = ma_strlen_WCHAR(pDeviceIDString); + if (idlen+1 > ma_countof(pDeviceID->wasapi)) { + ma_CoTaskMemFree(pContext, pDeviceIDString); + MA_ASSERT(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must haved change and is too long to fit in our fixed sized buffer. */ + return MA_ERROR; + } + + MA_COPY_MEMORY(pDeviceID->wasapi, pDeviceIDString, idlen * sizeof(wchar_t)); + pDeviceID->wasapi[idlen] = '\0'; + + ma_CoTaskMemFree(pContext, pDeviceIDString); + + return MA_SUCCESS; + } + + return MA_ERROR; +} + +static ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, WCHAR* pDefaultDeviceID, ma_bool32 onlySimpleInfo, ma_device_info* pInfo) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pMMDevice != NULL); + MA_ASSERT(pInfo != NULL); + + /* ID. */ + result = ma_context_get_device_id_from_MMDevice__wasapi(pContext, pMMDevice, &pInfo->id); + if (result == MA_SUCCESS) { + if (pDefaultDeviceID != NULL) { + if (ma_strcmp_WCHAR(pInfo->id.wasapi, pDefaultDeviceID) == 0) { + pInfo->isDefault = MA_TRUE; + } + } + } + + /* Description / Friendly Name */ + { + ma_IPropertyStore *pProperties; + hr = ma_IMMDevice_OpenPropertyStore(pMMDevice, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT var; + + ma_PropVariantInit(&var); + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &var); + if (SUCCEEDED(hr)) { + WideCharToMultiByte(CP_UTF8, 0, var.pwszVal, -1, pInfo->name, sizeof(pInfo->name), 0, FALSE); + ma_PropVariantClear(pContext, &var); + } + + ma_IPropertyStore_Release(pProperties); + } + } + + /* Format */ + if (!onlySimpleInfo) { + ma_IAudioClient* pAudioClient; + hr = ma_IMMDevice_Activate(pMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient); + if (SUCCEEDED(hr)) { + result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, pMMDevice, pAudioClient, pInfo); + + ma_IAudioClient_Release(pAudioClient); + return result; + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate audio client for device info retrieval."); + return ma_result_from_HRESULT(hr); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices_by_type__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result = MA_SUCCESS; + UINT deviceCount; + HRESULT hr; + ma_uint32 iDevice; + WCHAR* pDefaultDeviceID = NULL; + ma_IMMDeviceCollection* pDeviceCollection = NULL; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Grab the default device. We use this to know whether or not flag the returned device info as being the default. */ + pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType); + + /* We need to enumerate the devices which returns a device collection. */ + hr = ma_IMMDeviceEnumerator_EnumAudioEndpoints(pDeviceEnumerator, ma_device_type_to_EDataFlow(deviceType), MA_MM_DEVICE_STATE_ACTIVE, &pDeviceCollection); + if (SUCCEEDED(hr)) { + hr = ma_IMMDeviceCollection_GetCount(pDeviceCollection, &deviceCount); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to get device count.\n"); + result = ma_result_from_HRESULT(hr); + goto done; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + ma_IMMDevice* pMMDevice; + + MA_ZERO_OBJECT(&deviceInfo); + + hr = ma_IMMDeviceCollection_Item(pDeviceCollection, iDevice, &pMMDevice); + if (SUCCEEDED(hr)) { + result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, pDefaultDeviceID, MA_TRUE, &deviceInfo); /* MA_TRUE = onlySimpleInfo. */ + + ma_IMMDevice_Release(pMMDevice); + if (result == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, deviceType, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + break; + } + } + } + } + } + +done: + if (pDefaultDeviceID != NULL) { + ma_CoTaskMemFree(pContext, pDefaultDeviceID); + pDefaultDeviceID = NULL; + } + + if (pDeviceCollection != NULL) { + ma_IMMDeviceCollection_Release(pDeviceCollection); + pDeviceCollection = NULL; + } + + return result; +} + +static ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, MA_PROPVARIANT* pActivationParams, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); + MA_ASSERT(ppMMDevice != NULL); + + result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, ppMMDevice); + if (result != MA_SUCCESS) { + return result; + } + + hr = ma_IMMDevice_Activate(*ppMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, pActivationParams, (void**)ppAudioClient); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + return MA_SUCCESS; +} +#else +static ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, MA_PROPVARIANT* pActivationParams, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface) +{ + ma_IActivateAudioInterfaceAsyncOperation *pAsyncOp = NULL; + ma_completion_handler_uwp completionHandler; + IID iid; + WCHAR* iidStr; + HRESULT hr; + ma_result result; + HRESULT activateResult; + ma_IUnknown* pActivatedInterface; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); + + if (pDeviceID != NULL) { + iidStr = (WCHAR*)pDeviceID->wasapi; + } else { + if (deviceType == ma_device_type_capture) { + iid = MA_IID_DEVINTERFACE_AUDIO_CAPTURE; + } else { + iid = MA_IID_DEVINTERFACE_AUDIO_RENDER; + } + +#if defined(__cplusplus) + hr = StringFromIID(iid, &iidStr); +#else + hr = StringFromIID(&iid, &iidStr); +#endif + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to convert device IID to string for ActivateAudioInterfaceAsync(). Out of memory.\n"); + return ma_result_from_HRESULT(hr); + } + } + + result = ma_completion_handler_uwp_init(&completionHandler); + if (result != MA_SUCCESS) { + ma_CoTaskMemFree(pContext, iidStr); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for waiting for ActivateAudioInterfaceAsync().\n"); + return result; + } + + hr = ((MA_PFN_ActivateAudioInterfaceAsync)pContext->wasapi.ActivateAudioInterfaceAsync)(iidStr, &MA_IID_IAudioClient, pActivationParams, (ma_IActivateAudioInterfaceCompletionHandler*)&completionHandler, (ma_IActivateAudioInterfaceAsyncOperation**)&pAsyncOp); + if (FAILED(hr)) { + ma_completion_handler_uwp_uninit(&completionHandler); + ma_CoTaskMemFree(pContext, iidStr); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] ActivateAudioInterfaceAsync() failed.\n"); + return ma_result_from_HRESULT(hr); + } + + if (pDeviceID == NULL) { + ma_CoTaskMemFree(pContext, iidStr); + } + + /* Wait for the async operation for finish. */ + ma_completion_handler_uwp_wait(&completionHandler); + ma_completion_handler_uwp_uninit(&completionHandler); + + hr = ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(pAsyncOp, &activateResult, &pActivatedInterface); + ma_IActivateAudioInterfaceAsyncOperation_Release(pAsyncOp); + + if (FAILED(hr) || FAILED(activateResult)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate device.\n"); + return FAILED(hr) ? ma_result_from_HRESULT(hr) : ma_result_from_HRESULT(activateResult); + } + + /* Here is where we grab the IAudioClient interface. */ + hr = ma_IUnknown_QueryInterface(pActivatedInterface, &MA_IID_IAudioClient, (void**)ppAudioClient); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to query IAudioClient interface.\n"); + return ma_result_from_HRESULT(hr); + } + + if (ppActivatedInterface) { + *ppActivatedInterface = pActivatedInterface; + } else { + ma_IUnknown_Release(pActivatedInterface); + } + + return MA_SUCCESS; +} +#endif + + +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ne-audioclientactivationparams-audioclient_activation_type */ +typedef enum +{ + MA_AUDIOCLIENT_ACTIVATION_TYPE_DEFAULT, + MA_AUDIOCLIENT_ACTIVATION_TYPE_PROCESS_LOOPBACK +} MA_AUDIOCLIENT_ACTIVATION_TYPE; + +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ne-audioclientactivationparams-process_loopback_mode */ +typedef enum +{ + MA_PROCESS_LOOPBACK_MODE_INCLUDE_TARGET_PROCESS_TREE, + MA_PROCESS_LOOPBACK_MODE_EXCLUDE_TARGET_PROCESS_TREE +} MA_PROCESS_LOOPBACK_MODE; + +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ns-audioclientactivationparams-audioclient_process_loopback_params */ +typedef struct +{ + DWORD TargetProcessId; + MA_PROCESS_LOOPBACK_MODE ProcessLoopbackMode; +} MA_AUDIOCLIENT_PROCESS_LOOPBACK_PARAMS; + +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ +#if defined(__clang__) +#pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */ +#endif +#endif +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ns-audioclientactivationparams-audioclient_activation_params */ +typedef struct +{ + MA_AUDIOCLIENT_ACTIVATION_TYPE ActivationType; + union + { + MA_AUDIOCLIENT_PROCESS_LOOPBACK_PARAMS ProcessLoopbackParams; + }; +} MA_AUDIOCLIENT_ACTIVATION_PARAMS; +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) +#pragma GCC diagnostic pop +#endif + +#define MA_VIRTUAL_AUDIO_DEVICE_PROCESS_LOOPBACK L"VAD\\Process_Loopback" + +static ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_uint32 loopbackProcessID, ma_bool32 loopbackProcessExclude, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface) +{ + ma_result result; + ma_bool32 usingProcessLoopback = MA_FALSE; + MA_AUDIOCLIENT_ACTIVATION_PARAMS audioclientActivationParams; + MA_PROPVARIANT activationParams; + MA_PROPVARIANT* pActivationParams = NULL; + ma_device_id virtualDeviceID; + + /* Activation parameters specific to loopback mode. Note that process-specific loopback will only work when a default device ID is specified. */ + if (deviceType == ma_device_type_loopback && loopbackProcessID != 0 && pDeviceID == NULL) { + usingProcessLoopback = MA_TRUE; + } + + if (usingProcessLoopback) { + MA_ZERO_OBJECT(&audioclientActivationParams); + audioclientActivationParams.ActivationType = MA_AUDIOCLIENT_ACTIVATION_TYPE_PROCESS_LOOPBACK; + audioclientActivationParams.ProcessLoopbackParams.ProcessLoopbackMode = (loopbackProcessExclude) ? MA_PROCESS_LOOPBACK_MODE_EXCLUDE_TARGET_PROCESS_TREE : MA_PROCESS_LOOPBACK_MODE_INCLUDE_TARGET_PROCESS_TREE; + audioclientActivationParams.ProcessLoopbackParams.TargetProcessId = (DWORD)loopbackProcessID; + + ma_PropVariantInit(&activationParams); + activationParams.vt = MA_VT_BLOB; + activationParams.blob.cbSize = sizeof(audioclientActivationParams); + activationParams.blob.pBlobData = (BYTE*)&audioclientActivationParams; + pActivationParams = &activationParams; + + /* When requesting a specific device ID we need to use a special device ID. */ + MA_COPY_MEMORY(virtualDeviceID.wasapi, MA_VIRTUAL_AUDIO_DEVICE_PROCESS_LOOPBACK, (wcslen(MA_VIRTUAL_AUDIO_DEVICE_PROCESS_LOOPBACK) + 1) * sizeof(wchar_t)); /* +1 for the null terminator. */ + pDeviceID = &virtualDeviceID; + } else { + pActivationParams = NULL; /* No activation parameters required. */ + } + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + result = ma_context_get_IAudioClient_Desktop__wasapi(pContext, deviceType, pDeviceID, pActivationParams, ppAudioClient, ppDeviceInterface); +#else + result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, pActivationParams, ppAudioClient, ppDeviceInterface); +#endif + + /* + If loopback mode was requested with a process ID and initialization failed, it could be because it's + trying to run on an older version of Windows where it's not supported. We need to let the caller + know about this with a log message. + */ + if (result != MA_SUCCESS) { + if (usingProcessLoopback) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Loopback mode requested to %s process ID %u, but initialization failed. Support for this feature begins with Windows 10 Build 20348. Confirm your version of Windows or consider not using process-specific loopback.\n", (loopbackProcessExclude) ? "exclude" : "include", loopbackProcessID); + } + } + + return result; +} + + +static ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + /* Different enumeration for desktop and UWP. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + /* Desktop */ + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + + hr = ma_CoCreateInstance(pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator."); + return ma_result_from_HRESULT(hr); + } + + ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_playback, callback, pUserData); + ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_capture, callback, pUserData); + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); +#else + /* + UWP + + The MMDevice API is only supported on desktop applications. For now, while I'm still figuring out how to properly enumerate + over devices without using MMDevice, I'm restricting devices to defaults. + + Hint: DeviceInformation::FindAllAsync() with DeviceClass.AudioCapture/AudioRender. https://blogs.windows.com/buildingapps/2014/05/15/real-time-audio-in-windows-store-and-windows-phone-apps/ + */ + if (callback) { + ma_bool32 cbResult = MA_TRUE; + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + ma_result result; + ma_IMMDevice* pMMDevice = NULL; + WCHAR* pDefaultDeviceID = NULL; + + result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, &pMMDevice); + if (result != MA_SUCCESS) { + return result; + } + + /* We need the default device ID so we can set the isDefault flag in the device info. */ + pDefaultDeviceID = ma_context_get_default_device_id__wasapi(pContext, deviceType); + + result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, pDefaultDeviceID, MA_FALSE, pDeviceInfo); /* MA_FALSE = !onlySimpleInfo. */ + + if (pDefaultDeviceID != NULL) { + ma_CoTaskMemFree(pContext, pDefaultDeviceID); + pDefaultDeviceID = NULL; + } + + ma_IMMDevice_Release(pMMDevice); + + return result; +#else + ma_IAudioClient* pAudioClient; + ma_result result; + + /* UWP currently only uses default devices. */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, NULL, &pAudioClient, NULL); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, NULL, pAudioClient, pDeviceInfo); + + pDeviceInfo->isDefault = MA_TRUE; /* UWP only supports default devices. */ + + ma_IAudioClient_Release(pAudioClient); + return result; +#endif +} + +static ma_result ma_device_uninit__wasapi(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + if (pDevice->wasapi.pDeviceEnumerator) { + ((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator)->lpVtbl->UnregisterEndpointNotificationCallback((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator, &pDevice->wasapi.notificationClient); + ma_IMMDeviceEnumerator_Release((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator); + } +#endif + + if (pDevice->wasapi.pRenderClient) { + if (pDevice->wasapi.pMappedBufferPlayback != NULL) { + ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0); + pDevice->wasapi.pMappedBufferPlayback = NULL; + pDevice->wasapi.mappedBufferPlaybackCap = 0; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + } + + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + } + if (pDevice->wasapi.pCaptureClient) { + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + } + + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + } + + if (pDevice->wasapi.pAudioClientPlayback) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + } + if (pDevice->wasapi.pAudioClientCapture) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + } + + if (pDevice->wasapi.hEventPlayback) { + CloseHandle((HANDLE)pDevice->wasapi.hEventPlayback); + } + if (pDevice->wasapi.hEventCapture) { + CloseHandle((HANDLE)pDevice->wasapi.hEventCapture); + } + + return MA_SUCCESS; +} + + +typedef struct +{ + /* Input. */ + ma_format formatIn; + ma_uint32 channelsIn; + ma_uint32 sampleRateIn; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesIn; + ma_uint32 periodSizeInMillisecondsIn; + ma_uint32 periodsIn; + ma_share_mode shareMode; + ma_performance_profile performanceProfile; + ma_bool32 noAutoConvertSRC; + ma_bool32 noDefaultQualitySRC; + ma_bool32 noHardwareOffloading; + ma_uint32 loopbackProcessID; + ma_bool32 loopbackProcessExclude; + + /* Output. */ + ma_IAudioClient* pAudioClient; + ma_IAudioRenderClient* pRenderClient; + ma_IAudioCaptureClient* pCaptureClient; + ma_format formatOut; + ma_uint32 channelsOut; + ma_uint32 sampleRateOut; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesOut; + ma_uint32 periodsOut; + ma_bool32 usingAudioClient3; + char deviceName[256]; + ma_device_id id; +} ma_device_init_internal_data__wasapi; + +static ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData) +{ + HRESULT hr; + ma_result result = MA_SUCCESS; + const char* errorMsg = ""; + MA_AUDCLNT_SHAREMODE shareMode = MA_AUDCLNT_SHAREMODE_SHARED; + DWORD streamFlags = 0; + MA_REFERENCE_TIME periodDurationInMicroseconds; + ma_bool32 wasInitializedUsingIAudioClient3 = MA_FALSE; + MA_WAVEFORMATEXTENSIBLE wf; + ma_WASAPIDeviceInterface* pDeviceInterface = NULL; + ma_IAudioClient2* pAudioClient2; + ma_uint32 nativeSampleRate; + ma_bool32 usingProcessLoopback = MA_FALSE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pData != NULL); + + /* This function is only used to initialize one device type: either playback, capture or loopback. Never full-duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + usingProcessLoopback = deviceType == ma_device_type_loopback && pData->loopbackProcessID != 0 && pDeviceID == NULL; + + pData->pAudioClient = NULL; + pData->pRenderClient = NULL; + pData->pCaptureClient = NULL; + + streamFlags = MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK; + if (!pData->noAutoConvertSRC && pData->sampleRateIn != 0 && pData->shareMode != ma_share_mode_exclusive) { /* <-- Exclusive streams must use the native sample rate. */ + streamFlags |= MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; + } + if (!pData->noDefaultQualitySRC && pData->sampleRateIn != 0 && (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) != 0) { + streamFlags |= MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; + } + if (deviceType == ma_device_type_loopback) { + streamFlags |= MA_AUDCLNT_STREAMFLAGS_LOOPBACK; + } + + result = ma_context_get_IAudioClient__wasapi(pContext, deviceType, pDeviceID, pData->loopbackProcessID, pData->loopbackProcessExclude, &pData->pAudioClient, &pDeviceInterface); + if (result != MA_SUCCESS) { + goto done; + } + + MA_ZERO_OBJECT(&wf); + + /* Try enabling hardware offloading. */ + if (!pData->noHardwareOffloading) { + hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2); + if (SUCCEEDED(hr)) { + BOOL isHardwareOffloadingSupported = 0; + hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported); + if (SUCCEEDED(hr) && isHardwareOffloadingSupported) { + ma_AudioClientProperties clientProperties; + MA_ZERO_OBJECT(&clientProperties); + clientProperties.cbSize = sizeof(clientProperties); + clientProperties.bIsOffload = 1; + clientProperties.eCategory = MA_AudioCategory_Other; + ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties); + } + + pAudioClient2->lpVtbl->Release(pAudioClient2); + } + } + + /* Here is where we try to determine the best format to use with the device. If the client if wanting exclusive mode, first try finding the best format for that. If this fails, fall back to shared mode. */ + result = MA_FORMAT_NOT_SUPPORTED; + if (pData->shareMode == ma_share_mode_exclusive) { +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + /* In exclusive mode on desktop we always use the backend's native format. */ + ma_IPropertyStore* pStore = NULL; + hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pStore); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT prop; + ma_PropVariantInit(&prop); + hr = ma_IPropertyStore_GetValue(pStore, &MA_PKEY_AudioEngine_DeviceFormat, &prop); + if (SUCCEEDED(hr)) { + MA_WAVEFORMATEX* pActualFormat = (MA_WAVEFORMATEX*)prop.blob.pBlobData; + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pData->pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pActualFormat, NULL); + if (SUCCEEDED(hr)) { + MA_COPY_MEMORY(&wf, pActualFormat, sizeof(MA_WAVEFORMATEXTENSIBLE)); + } + + ma_PropVariantClear(pContext, &prop); + } + + ma_IPropertyStore_Release(pStore); + } +#else + /* + I do not know how to query the device's native format on UWP so for now I'm just disabling support for + exclusive mode. The alternative is to enumerate over different formats and check IsFormatSupported() + until you find one that works. + + TODO: Add support for exclusive mode to UWP. + */ + hr = S_FALSE; +#endif + + if (hr == S_OK) { + shareMode = MA_AUDCLNT_SHAREMODE_EXCLUSIVE; + result = MA_SUCCESS; + } else { + result = MA_SHARE_MODE_NOT_SUPPORTED; + } + } else { + /* In shared mode we are always using the format reported by the operating system. */ + MA_WAVEFORMATEXTENSIBLE* pNativeFormat = NULL; + hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pData->pAudioClient, (MA_WAVEFORMATEX**)&pNativeFormat); + if (hr != S_OK) { + /* When using process-specific loopback, GetMixFormat() seems to always fail. */ + if (usingProcessLoopback) { + wf.wFormatTag = WAVE_FORMAT_IEEE_FLOAT; + wf.nChannels = 2; + wf.nSamplesPerSec = 44100; + wf.wBitsPerSample = 32; + wf.nBlockAlign = wf.nChannels * wf.wBitsPerSample / 8; + wf.nAvgBytesPerSec = wf.nSamplesPerSec * wf.nBlockAlign; + wf.cbSize = sizeof(MA_WAVEFORMATEX); + + result = MA_SUCCESS; + } else { + result = MA_FORMAT_NOT_SUPPORTED; + } + } else { + /* + I've seen cases where cbSize will be set to sizeof(WAVEFORMATEX) even though the structure itself + is given the format tag of WAVE_FORMAT_EXTENSIBLE. If the format tag is WAVE_FORMAT_EXTENSIBLE + want to make sure we copy the whole WAVEFORMATEXTENSIBLE structure. Otherwise we'll have to be + safe and only copy the WAVEFORMATEX part. + */ + if (pNativeFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + MA_COPY_MEMORY(&wf, pNativeFormat, sizeof(MA_WAVEFORMATEXTENSIBLE)); + } else { + /* I've seen a case where cbSize was set to 0. Assume sizeof(WAVEFORMATEX) in this case. */ + size_t cbSize = pNativeFormat->cbSize; + if (cbSize == 0) { + cbSize = sizeof(MA_WAVEFORMATEX); + } + + /* Make sure we don't copy more than the capacity of `wf`. */ + if (cbSize > sizeof(wf)) { + cbSize = sizeof(wf); + } + + MA_COPY_MEMORY(&wf, pNativeFormat, cbSize); + } + + result = MA_SUCCESS; + } + + ma_CoTaskMemFree(pContext, pNativeFormat); + + shareMode = MA_AUDCLNT_SHAREMODE_SHARED; + } + + /* Return an error if we still haven't found a format. */ + if (result != MA_SUCCESS) { + errorMsg = "[WASAPI] Failed to find best device mix format."; + goto done; + } + + /* + Override the native sample rate with the one requested by the caller, but only if we're not using the default sample rate. We'll use + WASAPI to perform the sample rate conversion. + */ + nativeSampleRate = wf.nSamplesPerSec; + if (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) { + wf.nSamplesPerSec = (pData->sampleRateIn != 0) ? pData->sampleRateIn : MA_DEFAULT_SAMPLE_RATE; + wf.nAvgBytesPerSec = wf.nSamplesPerSec * wf.nBlockAlign; + } + + pData->formatOut = ma_format_from_WAVEFORMATEX((MA_WAVEFORMATEX*)&wf); + if (pData->formatOut == ma_format_unknown) { + /* + The format isn't supported. This is almost certainly because the exclusive mode format isn't supported by miniaudio. We need to return MA_SHARE_MODE_NOT_SUPPORTED + in this case so that the caller can detect it and fall back to shared mode if desired. We should never get here if shared mode was requested, but just for + completeness we'll check for it and return MA_FORMAT_NOT_SUPPORTED. + */ + if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) { + result = MA_SHARE_MODE_NOT_SUPPORTED; + } else { + result = MA_FORMAT_NOT_SUPPORTED; + } + + errorMsg = "[WASAPI] Native format not supported."; + goto done; + } + + pData->channelsOut = wf.nChannels; + pData->sampleRateOut = wf.nSamplesPerSec; + + /* + Get the internal channel map based on the channel mask. There is a possibility that GetMixFormat() returns + a WAVEFORMATEX instead of a WAVEFORMATEXTENSIBLE, in which case the channel mask will be undefined. In this + case we'll just use the default channel map. + */ + if (wf.wFormatTag == WAVE_FORMAT_EXTENSIBLE || wf.cbSize >= sizeof(MA_WAVEFORMATEXTENSIBLE)) { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pData->channelsOut, pData->channelMapOut); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut); + } + + /* Period size. */ + pData->periodsOut = (pData->periodsIn != 0) ? pData->periodsIn : MA_DEFAULT_PERIODS; + pData->periodSizeInFramesOut = pData->periodSizeInFramesIn; + if (pData->periodSizeInFramesOut == 0) { + if (pData->periodSizeInMillisecondsIn == 0) { + if (pData->performanceProfile == ma_performance_profile_low_latency) { + pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, wf.nSamplesPerSec); + } else { + pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, wf.nSamplesPerSec); + } + } else { + pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, wf.nSamplesPerSec); + } + } + + periodDurationInMicroseconds = ((ma_uint64)pData->periodSizeInFramesOut * 1000 * 1000) / wf.nSamplesPerSec; + + + /* Slightly different initialization for shared and exclusive modes. We try exclusive mode first, and if it fails, fall back to shared mode. */ + if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) { + MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10; + + /* + If the periodicy is too small, Initialize() will fail with AUDCLNT_E_INVALID_DEVICE_PERIOD. In this case we should just keep increasing + it and trying it again. + */ + hr = E_FAIL; + for (;;) { + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (MA_WAVEFORMATEX*)&wf, NULL); + if (hr == MA_AUDCLNT_E_INVALID_DEVICE_PERIOD) { + if (bufferDuration > 500*10000) { + break; + } else { + if (bufferDuration == 0) { /* <-- Just a sanity check to prevent an infinit loop. Should never happen, but it makes me feel better. */ + break; + } + + bufferDuration = bufferDuration * 2; + continue; + } + } else { + break; + } + } + + if (hr == MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { + ma_uint32 bufferSizeInFrames; + hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames); + if (SUCCEEDED(hr)) { + bufferDuration = (MA_REFERENCE_TIME)((10000.0 * 1000 / wf.nSamplesPerSec * bufferSizeInFrames) + 0.5); + + /* Unfortunately we need to release and re-acquire the audio client according to MSDN. Seems silly - why not just call IAudioClient_Initialize() again?! */ + ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient); + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + hr = ma_IMMDevice_Activate(pDeviceInterface, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pData->pAudioClient); +#else + hr = ma_IUnknown_QueryInterface(pDeviceInterface, &MA_IID_IAudioClient, (void**)&pData->pAudioClient); +#endif + + if (SUCCEEDED(hr)) { + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (MA_WAVEFORMATEX*)&wf, NULL); + } + } + } + + if (FAILED(hr)) { + /* Failed to initialize in exclusive mode. Don't fall back to shared mode - instead tell the client about it. They can reinitialize in shared mode if they want. */ + if (hr == E_ACCESSDENIED) { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Access denied.", result = MA_ACCESS_DENIED; + } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Device in use.", result = MA_BUSY; + } else { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode."; result = ma_result_from_HRESULT(hr); + } + goto done; + } + } + + if (shareMode == MA_AUDCLNT_SHAREMODE_SHARED) { + /* + Low latency shared mode via IAudioClient3. + + NOTE + ==== + Contrary to the documentation on MSDN (https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nf-audioclient-iaudioclient3-initializesharedaudiostream), the + use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM and AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY with IAudioClient3_InitializeSharedAudioStream() absolutely does not work. Using + any of these flags will result in HRESULT code 0x88890021. The other problem is that calling IAudioClient3_GetSharedModeEnginePeriod() with a sample rate different to + that returned by IAudioClient_GetMixFormat() also results in an error. I'm therefore disabling low-latency shared mode with AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. + */ +#ifndef MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE + { + if ((streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) == 0 || nativeSampleRate == wf.nSamplesPerSec) { + ma_IAudioClient3* pAudioClient3 = NULL; + hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient3, (void**)&pAudioClient3); + if (SUCCEEDED(hr)) { + ma_uint32 defaultPeriodInFrames; + ma_uint32 fundamentalPeriodInFrames; + ma_uint32 minPeriodInFrames; + ma_uint32 maxPeriodInFrames; + hr = ma_IAudioClient3_GetSharedModeEnginePeriod(pAudioClient3, (MA_WAVEFORMATEX*)&wf, &defaultPeriodInFrames, &fundamentalPeriodInFrames, &minPeriodInFrames, &maxPeriodInFrames); + if (SUCCEEDED(hr)) { + ma_uint32 desiredPeriodInFrames = pData->periodSizeInFramesOut; + ma_uint32 actualPeriodInFrames = desiredPeriodInFrames; + + /* Make sure the period size is a multiple of fundamentalPeriodInFrames. */ + actualPeriodInFrames = actualPeriodInFrames / fundamentalPeriodInFrames; + actualPeriodInFrames = actualPeriodInFrames * fundamentalPeriodInFrames; + + /* The period needs to be clamped between minPeriodInFrames and maxPeriodInFrames. */ + actualPeriodInFrames = ma_clamp(actualPeriodInFrames, minPeriodInFrames, maxPeriodInFrames); + + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Trying IAudioClient3_InitializeSharedAudioStream(actualPeriodInFrames=%d)\n", actualPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " defaultPeriodInFrames=%d\n", defaultPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " fundamentalPeriodInFrames=%d\n", fundamentalPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " minPeriodInFrames=%d\n", minPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " maxPeriodInFrames=%d\n", maxPeriodInFrames); + + /* If the client requested a largish buffer than we don't actually want to use low latency shared mode because it forces small buffers. */ + if (actualPeriodInFrames >= desiredPeriodInFrames) { + /* + MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY must not be in the stream flags. If either of these are specified, + IAudioClient3_InitializeSharedAudioStream() will fail. + */ + hr = ma_IAudioClient3_InitializeSharedAudioStream(pAudioClient3, streamFlags & ~(MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY), actualPeriodInFrames, (MA_WAVEFORMATEX*)&wf, NULL); + if (SUCCEEDED(hr)) { + wasInitializedUsingIAudioClient3 = MA_TRUE; + pData->periodSizeInFramesOut = actualPeriodInFrames; + + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Using IAudioClient3\n"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " periodSizeInFramesOut=%d\n", pData->periodSizeInFramesOut); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] IAudioClient3_InitializeSharedAudioStream failed. Falling back to IAudioClient.\n"); + } + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Not using IAudioClient3 because the desired period size is larger than the maximum supported by IAudioClient3.\n"); + } + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] IAudioClient3_GetSharedModeEnginePeriod failed. Falling back to IAudioClient.\n"); + } + + ma_IAudioClient3_Release(pAudioClient3); + pAudioClient3 = NULL; + } + } + } +#else + { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Not using IAudioClient3 because MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE is enabled.\n"); + } +#endif + + /* If we don't have an IAudioClient3 then we need to use the normal initialization routine. */ + if (!wasInitializedUsingIAudioClient3) { + MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10; /* <-- Multiply by 10 for microseconds to 100-nanoseconds. */ + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, 0, (const MA_WAVEFORMATEX*)&wf, NULL); + if (FAILED(hr)) { + if (hr == E_ACCESSDENIED) { + errorMsg = "[WASAPI] Failed to initialize device. Access denied.", result = MA_ACCESS_DENIED; + } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) { + errorMsg = "[WASAPI] Failed to initialize device. Device in use.", result = MA_BUSY; + } else { + errorMsg = "[WASAPI] Failed to initialize device.", result = ma_result_from_HRESULT(hr); + } + + goto done; + } + } + } + + if (!wasInitializedUsingIAudioClient3) { + ma_uint32 bufferSizeInFrames = 0; + hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames); + if (FAILED(hr)) { + errorMsg = "[WASAPI] Failed to get audio client's actual buffer size.", result = ma_result_from_HRESULT(hr); + goto done; + } + + /* + When using process loopback mode, retrieval of the buffer size seems to result in totally + incorrect values. In this case we'll just assume it's the same size as what we requested + when we initialized the client. + */ + if (usingProcessLoopback) { + bufferSizeInFrames = (ma_uint32)((periodDurationInMicroseconds * pData->periodsOut) * pData->sampleRateOut / 1000000); + } + + pData->periodSizeInFramesOut = bufferSizeInFrames / pData->periodsOut; + } + + pData->usingAudioClient3 = wasInitializedUsingIAudioClient3; + + + if (deviceType == ma_device_type_playback) { + result = ma_device_create_IAudioClient_service__wasapi(pContext, deviceType, (ma_IAudioClient*)pData->pAudioClient, (void**)&pData->pRenderClient); + } else { + result = ma_device_create_IAudioClient_service__wasapi(pContext, deviceType, (ma_IAudioClient*)pData->pAudioClient, (void**)&pData->pCaptureClient); + } + + /*if (FAILED(hr)) {*/ + if (result != MA_SUCCESS) { + errorMsg = "[WASAPI] Failed to get audio client service."; + goto done; + } + + + /* Grab the name of the device. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + { + ma_IPropertyStore *pProperties; + hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT varName; + ma_PropVariantInit(&varName); + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &varName); + if (SUCCEEDED(hr)) { + WideCharToMultiByte(CP_UTF8, 0, varName.pwszVal, -1, pData->deviceName, sizeof(pData->deviceName), 0, FALSE); + ma_PropVariantClear(pContext, &varName); + } + + ma_IPropertyStore_Release(pProperties); + } + } +#endif + + /* + For the WASAPI backend we need to know the actual IDs of the device in order to do automatic + stream routing so that IDs can be compared and we can determine which device has been detached + and whether or not it matches with our ma_device. + */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + { + /* Desktop */ + ma_context_get_device_id_from_MMDevice__wasapi(pContext, pDeviceInterface, &pData->id); + } +#else + { + /* UWP */ + /* TODO: Implement me. Need to figure out how to get the ID of the default device. */ + } +#endif + +done: + /* Clean up. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + if (pDeviceInterface != NULL) { + ma_IMMDevice_Release(pDeviceInterface); + } +#else + if (pDeviceInterface != NULL) { + ma_IUnknown_Release(pDeviceInterface); + } +#endif + + if (result != MA_SUCCESS) { + if (pData->pRenderClient) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pData->pRenderClient); + pData->pRenderClient = NULL; + } + if (pData->pCaptureClient) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pData->pCaptureClient); + pData->pCaptureClient = NULL; + } + if (pData->pAudioClient) { + ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient); + pData->pAudioClient = NULL; + } + + if (errorMsg != NULL && errorMsg[0] != '\0') { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "%s\n", errorMsg); + } + + return result; + } else { + return MA_SUCCESS; + } +} + +static ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_device_init_internal_data__wasapi data; + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* We only re-initialize the playback or capture device. Never a full-duplex device. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + + /* + Before reinitializing the device we need to free the previous audio clients. + + There's a known memory leak here. We will be calling this from the routing change callback that + is fired by WASAPI. If we attempt to release the IAudioClient we will deadlock. In my opinion + this is a bug. I'm not sure what I need to do to handle this cleanly, but I think we'll probably + need some system where we post an event, but delay the execution of it until the callback has + returned. I'm not sure how to do this reliably, however. I have set up some infrastructure for + a command thread which might be useful for this. + */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) { + if (pDevice->wasapi.pCaptureClient) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + + if (pDevice->wasapi.pAudioClientCapture) { + /*ma_device_release_IAudioClient_service__wasapi(pDevice, ma_device_type_capture);*/ + pDevice->wasapi.pAudioClientCapture = NULL; + } + } + + if (deviceType == ma_device_type_playback) { + if (pDevice->wasapi.pRenderClient) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + pDevice->wasapi.pRenderClient = NULL; + } + + if (pDevice->wasapi.pAudioClientPlayback) { + /*ma_device_release_IAudioClient_service__wasapi(pDevice, ma_device_type_playback);*/ + pDevice->wasapi.pAudioClientPlayback = NULL; + } + } + + + if (deviceType == ma_device_type_playback) { + data.formatIn = pDevice->playback.format; + data.channelsIn = pDevice->playback.channels; + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + data.shareMode = pDevice->playback.shareMode; + } else { + data.formatIn = pDevice->capture.format; + data.channelsIn = pDevice->capture.channels; + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + data.shareMode = pDevice->capture.shareMode; + } + + data.sampleRateIn = pDevice->sampleRate; + data.periodSizeInFramesIn = pDevice->wasapi.originalPeriodSizeInFrames; + data.periodSizeInMillisecondsIn = pDevice->wasapi.originalPeriodSizeInMilliseconds; + data.periodsIn = pDevice->wasapi.originalPeriods; + data.performanceProfile = pDevice->wasapi.originalPerformanceProfile; + data.noAutoConvertSRC = pDevice->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pDevice->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pDevice->wasapi.noHardwareOffloading; + data.loopbackProcessID = pDevice->wasapi.loopbackProcessID; + data.loopbackProcessExclude = pDevice->wasapi.loopbackProcessExclude; + result = ma_device_init_internal__wasapi(pDevice->pContext, deviceType, NULL, &data); + if (result != MA_SUCCESS) { + return result; + } + + /* At this point we have some new objects ready to go. We need to uninitialize the previous ones and then set the new ones. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) { + pDevice->wasapi.pAudioClientCapture = data.pAudioClient; + pDevice->wasapi.pCaptureClient = data.pCaptureClient; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName); + + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, (HANDLE)pDevice->wasapi.hEventCapture); + + pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture); + + /* We must always have a valid ID. */ + ma_strcpy_s_WCHAR(pDevice->capture.id.wasapi, sizeof(pDevice->capture.id.wasapi), data.id.wasapi); + } + + if (deviceType == ma_device_type_playback) { + pDevice->wasapi.pAudioClientPlayback = data.pAudioClient; + pDevice->wasapi.pRenderClient = data.pRenderClient; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName); + + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, (HANDLE)pDevice->wasapi.hEventPlayback); + + pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback); + + /* We must always have a valid ID because rerouting will look at it. */ + ma_strcpy_s_WCHAR(pDevice->playback.id.wasapi, sizeof(pDevice->playback.id.wasapi), data.id.wasapi); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__wasapi(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result = MA_SUCCESS; + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; +#endif + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->wasapi); + pDevice->wasapi.usage = pConfig->wasapi.usage; + pDevice->wasapi.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + pDevice->wasapi.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + pDevice->wasapi.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + pDevice->wasapi.loopbackProcessID = pConfig->wasapi.loopbackProcessID; + pDevice->wasapi.loopbackProcessExclude = pConfig->wasapi.loopbackProcessExclude; + + /* Exclusive mode is not allowed with loopback. */ + if (pConfig->deviceType == ma_device_type_loopback && pConfig->playback.shareMode == ma_share_mode_exclusive) { + return MA_INVALID_DEVICE_CONFIG; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + ma_device_init_internal_data__wasapi data; + data.formatIn = pDescriptorCapture->format; + data.channelsIn = pDescriptorCapture->channels; + data.sampleRateIn = pDescriptorCapture->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap)); + data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorCapture->periodSizeInMilliseconds; + data.periodsIn = pDescriptorCapture->periodCount; + data.shareMode = pDescriptorCapture->shareMode; + data.performanceProfile = pConfig->performanceProfile; + data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + data.loopbackProcessID = pConfig->wasapi.loopbackProcessID; + data.loopbackProcessExclude = pConfig->wasapi.loopbackProcessExclude; + + result = ma_device_init_internal__wasapi(pDevice->pContext, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture, pDescriptorCapture->pDeviceID, &data); + if (result != MA_SUCCESS) { + return result; + } + + pDevice->wasapi.pAudioClientCapture = data.pAudioClient; + pDevice->wasapi.pCaptureClient = data.pCaptureClient; + pDevice->wasapi.originalPeriodSizeInMilliseconds = pDescriptorCapture->periodSizeInMilliseconds; + pDevice->wasapi.originalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames; + pDevice->wasapi.originalPeriods = pDescriptorCapture->periodCount; + pDevice->wasapi.originalPerformanceProfile = pConfig->performanceProfile; + + /* + The event for capture needs to be manual reset for the same reason as playback. We keep the initial state set to unsignaled, + however, because we want to block until we actually have something for the first call to ma_device_read(). + */ + pDevice->wasapi.hEventCapture = (ma_handle)CreateEventA(NULL, FALSE, FALSE, NULL); /* Auto reset, unsignaled by default. */ + if (pDevice->wasapi.hEventCapture == NULL) { + result = ma_result_from_GetLastError(GetLastError()); + + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for capture."); + return result; + } + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, (HANDLE)pDevice->wasapi.hEventCapture); + + pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture); + + /* We must always have a valid ID. */ + ma_strcpy_s_WCHAR(pDevice->capture.id.wasapi, sizeof(pDevice->capture.id.wasapi), data.id.wasapi); + + /* The descriptor needs to be updated with actual values. */ + pDescriptorCapture->format = data.formatOut; + pDescriptorCapture->channels = data.channelsOut; + pDescriptorCapture->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorCapture->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorCapture->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorCapture->periodCount = data.periodsOut; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__wasapi data; + data.formatIn = pDescriptorPlayback->format; + data.channelsIn = pDescriptorPlayback->channels; + data.sampleRateIn = pDescriptorPlayback->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap)); + data.periodSizeInFramesIn = pDescriptorPlayback->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorPlayback->periodSizeInMilliseconds; + data.periodsIn = pDescriptorPlayback->periodCount; + data.shareMode = pDescriptorPlayback->shareMode; + data.performanceProfile = pConfig->performanceProfile; + data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + data.loopbackProcessID = pConfig->wasapi.loopbackProcessID; + data.loopbackProcessExclude = pConfig->wasapi.loopbackProcessExclude; + + result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_playback, pDescriptorPlayback->pDeviceID, &data); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + CloseHandle((HANDLE)pDevice->wasapi.hEventCapture); + pDevice->wasapi.hEventCapture = NULL; + } + return result; + } + + pDevice->wasapi.pAudioClientPlayback = data.pAudioClient; + pDevice->wasapi.pRenderClient = data.pRenderClient; + pDevice->wasapi.originalPeriodSizeInMilliseconds = pDescriptorPlayback->periodSizeInMilliseconds; + pDevice->wasapi.originalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames; + pDevice->wasapi.originalPeriods = pDescriptorPlayback->periodCount; + pDevice->wasapi.originalPerformanceProfile = pConfig->performanceProfile; + + /* + The event for playback is needs to be manual reset because we want to explicitly control the fact that it becomes signalled + only after the whole available space has been filled, never before. + + The playback event also needs to be initially set to a signaled state so that the first call to ma_device_write() is able + to get passed WaitForMultipleObjects(). + */ + pDevice->wasapi.hEventPlayback = (ma_handle)CreateEventA(NULL, FALSE, TRUE, NULL); /* Auto reset, signaled by default. */ + if (pDevice->wasapi.hEventPlayback == NULL) { + result = ma_result_from_GetLastError(GetLastError()); + + if (pConfig->deviceType == ma_device_type_duplex) { + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + CloseHandle((HANDLE)pDevice->wasapi.hEventCapture); + pDevice->wasapi.hEventCapture = NULL; + } + + if (pDevice->wasapi.pRenderClient != NULL) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + pDevice->wasapi.pRenderClient = NULL; + } + if (pDevice->wasapi.pAudioClientPlayback != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + pDevice->wasapi.pAudioClientPlayback = NULL; + } + + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for playback."); + return result; + } + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, (HANDLE)pDevice->wasapi.hEventPlayback); + + pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback); + + /* We must always have a valid ID because rerouting will look at it. */ + ma_strcpy_s_WCHAR(pDevice->playback.id.wasapi, sizeof(pDevice->playback.id.wasapi), data.id.wasapi); + + /* The descriptor needs to be updated with actual values. */ + pDescriptorPlayback->format = data.formatOut; + pDescriptorPlayback->channels = data.channelsOut; + pDescriptorPlayback->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorPlayback->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorPlayback->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorPlayback->periodCount = data.periodsOut; + } + + /* + We need to register a notification client to detect when the device has been disabled, unplugged or re-routed (when the default device changes). When + we are connecting to the default device we want to do automatic stream routing when the device is disabled or unplugged. Otherwise we want to just + stop the device outright and let the application handle it. + */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + if (pConfig->wasapi.noAutoStreamRouting == MA_FALSE) { + if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) && pConfig->capture.pDeviceID == NULL) { + pDevice->wasapi.allowCaptureAutoStreamRouting = MA_TRUE; + } + if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID == NULL) { + pDevice->wasapi.allowPlaybackAutoStreamRouting = MA_TRUE; + } + } + + ma_mutex_init(&pDevice->wasapi.rerouteLock); + + hr = ma_CoCreateInstance(pDevice->pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_device_uninit__wasapi(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator."); + return ma_result_from_HRESULT(hr); + } + + pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl; + pDevice->wasapi.notificationClient.counter = 1; + pDevice->wasapi.notificationClient.pDevice = pDevice; + + hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient); + if (SUCCEEDED(hr)) { + pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator; + } else { + /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */ + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + } +#endif + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedCapture, MA_FALSE); + ma_atomic_bool32_set(&pDevice->wasapi.isStartedPlayback, MA_FALSE); + + return MA_SUCCESS; +} + +static ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount) +{ + ma_uint32 paddingFramesCount; + HRESULT hr; + ma_share_mode shareMode; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrameCount != NULL); + + *pFrameCount = 0; + + if ((ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientPlayback && (ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientCapture) { + return MA_INVALID_OPERATION; + } + + /* + I've had a report that GetCurrentPadding() is returning a frame count of 0 which is preventing + higher level function calls from doing anything because it thinks nothing is available. I have + taken a look at the documentation and it looks like this is unnecessary in exclusive mode. + + From Microsoft's documentation: + + For an exclusive-mode rendering or capture stream that was initialized with the + AUDCLNT_STREAMFLAGS_EVENTCALLBACK flag, the client typically has no use for the padding + value reported by GetCurrentPadding. Instead, the client accesses an entire buffer during + each processing pass. + + Considering this, I'm going to skip GetCurrentPadding() for exclusive mode and just report the + entire buffer. This depends on the caller making sure they wait on the event handler. + */ + shareMode = ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) ? pDevice->playback.shareMode : pDevice->capture.shareMode; + if (shareMode == ma_share_mode_shared) { + /* Shared mode. */ + hr = ma_IAudioClient_GetCurrentPadding(pAudioClient, &paddingFramesCount); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) { + *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesPlayback - paddingFramesCount; + } else { + *pFrameCount = paddingFramesCount; + } + } else { + /* Exclusive mode. */ + if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) { + *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesPlayback; + } else { + *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesCapture; + } + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "=== CHANGING DEVICE ===\n"); + + result = ma_device_reinit__wasapi(pDevice, deviceType); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WASAPI] Reinitializing device after route change failed.\n"); + return result; + } + + ma_device__post_init_setup(pDevice, deviceType); + ma_device__on_notification_rerouted(pDevice); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "=== DEVICE CHANGED ===\n"); + + return MA_SUCCESS; +} + +static ma_result ma_device_start__wasapi_nolock(ma_device* pDevice) +{ + HRESULT hr; + + if (pDevice->pContext->wasapi.hAvrt) { + const char* pTaskName = ma_to_usage_string__wasapi(pDevice->wasapi.usage); + if (pTaskName) { + DWORD idx = 0; + pDevice->wasapi.hAvrtHandle = (ma_handle)((MA_PFN_AvSetMmThreadCharacteristicsA)pDevice->pContext->wasapi.AvSetMmThreadCharacteristicsA)(pTaskName, &idx); + } + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device. HRESULT = %d.", (int)hr); + return ma_result_from_HRESULT(hr); + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedCapture, MA_TRUE); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device. HRESULT = %d.", (int)hr); + return ma_result_from_HRESULT(hr); + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedPlayback, MA_TRUE); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__wasapi(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* Wait for any rerouting to finish before attempting to start the device. */ + ma_mutex_lock(&pDevice->wasapi.rerouteLock); + { + result = ma_device_start__wasapi_nolock(pDevice); + } + ma_mutex_unlock(&pDevice->wasapi.rerouteLock); + + return result; +} + +static ma_result ma_device_stop__wasapi_nolock(ma_device* pDevice) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->wasapi.hAvrtHandle) { + ((MA_PFN_AvRevertMmThreadCharacteristics)pDevice->pContext->wasapi.AvRevertMmThreadcharacteristics)((HANDLE)pDevice->wasapi.hAvrtHandle); + pDevice->wasapi.hAvrtHandle = NULL; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal capture device."); + return ma_result_from_HRESULT(hr); + } + + /* The audio client needs to be reset otherwise restarting will fail. */ + hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal capture device."); + return ma_result_from_HRESULT(hr); + } + + /* If we have a mapped buffer we need to release it. */ + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedCapture, MA_FALSE); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* + The buffer needs to be drained before stopping the device. Not doing this will result in the last few frames not getting output to + the speakers. This is a problem for very short sounds because it'll result in a significant portion of it not getting played. + */ + if (ma_atomic_bool32_get(&pDevice->wasapi.isStartedPlayback)) { + /* We need to make sure we put a timeout here or else we'll risk getting stuck in a deadlock in some cases. */ + DWORD waitTime = pDevice->wasapi.actualBufferSizeInFramesPlayback / pDevice->playback.internalSampleRate; + + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, waitTime); + } + else { + ma_uint32 prevFramesAvaialablePlayback = (ma_uint32)-1; + ma_uint32 framesAvailablePlayback; + for (;;) { + result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback); + if (result != MA_SUCCESS) { + break; + } + + if (framesAvailablePlayback >= pDevice->wasapi.actualBufferSizeInFramesPlayback) { + break; + } + + /* + Just a safety check to avoid an infinite loop. If this iteration results in a situation where the number of available frames + has not changed, get out of the loop. I don't think this should ever happen, but I think it's nice to have just in case. + */ + if (framesAvailablePlayback == prevFramesAvaialablePlayback) { + break; + } + prevFramesAvaialablePlayback = framesAvailablePlayback; + + WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, waitTime * 1000); + ResetEvent((HANDLE)pDevice->wasapi.hEventPlayback); /* Manual reset. */ + } + } + } + + hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal playback device."); + return ma_result_from_HRESULT(hr); + } + + /* The audio client needs to be reset otherwise restarting will fail. */ + hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal playback device."); + return ma_result_from_HRESULT(hr); + } + + if (pDevice->wasapi.pMappedBufferPlayback != NULL) { + ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0); + pDevice->wasapi.pMappedBufferPlayback = NULL; + pDevice->wasapi.mappedBufferPlaybackCap = 0; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedPlayback, MA_FALSE); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__wasapi(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* Wait for any rerouting to finish before attempting to stop the device. */ + ma_mutex_lock(&pDevice->wasapi.rerouteLock); + { + result = ma_device_stop__wasapi_nolock(pDevice); + } + ma_mutex_unlock(&pDevice->wasapi.rerouteLock); + + return result; +} + + +#ifndef MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS +#define MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS 5000 +#endif + +static ma_result ma_device_read__wasapi(ma_device* pDevice, void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalFramesProcessed = 0; + + /* + When reading, we need to get a buffer and process all of it before releasing it. Because the + frame count (frameCount) can be different to the size of the buffer, we'll need to cache the + pointer to the buffer. + */ + + /* Keep running until we've processed the requested number of frames. */ + while (ma_device_get_state(pDevice) == ma_device_state_started && totalFramesProcessed < frameCount) { + ma_uint32 framesRemaining = frameCount - totalFramesProcessed; + + /* If we have a mapped data buffer, consume that first. */ + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + /* We have a cached data pointer so consume that before grabbing another one from WASAPI. */ + ma_uint32 framesToProcessNow = framesRemaining; + if (framesToProcessNow > pDevice->wasapi.mappedBufferCaptureLen) { + framesToProcessNow = pDevice->wasapi.mappedBufferCaptureLen; + } + + /* Now just copy the data over to the output buffer. */ + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pFrames, totalFramesProcessed, pDevice->capture.internalFormat, pDevice->capture.internalChannels), + ma_offset_pcm_frames_const_ptr(pDevice->wasapi.pMappedBufferCapture, pDevice->wasapi.mappedBufferCaptureCap - pDevice->wasapi.mappedBufferCaptureLen, pDevice->capture.internalFormat, pDevice->capture.internalChannels), + framesToProcessNow, + pDevice->capture.internalFormat, pDevice->capture.internalChannels + ); + + totalFramesProcessed += framesToProcessNow; + pDevice->wasapi.mappedBufferCaptureLen -= framesToProcessNow; + + /* If the data buffer has been fully consumed we need to release it. */ + if (pDevice->wasapi.mappedBufferCaptureLen == 0) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + } + } else { + /* We don't have any cached data pointer, so grab another one. */ + HRESULT hr; + DWORD flags = 0; + + /* First just ask WASAPI for a data buffer. If it's not available, we'll wait for more. */ + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pDevice->wasapi.pMappedBufferCapture, &pDevice->wasapi.mappedBufferCaptureCap, &flags, NULL, NULL); + if (hr == S_OK) { + /* We got a data buffer. Continue to the next loop iteration which will then read from the mapped pointer. */ + pDevice->wasapi.mappedBufferCaptureLen = pDevice->wasapi.mappedBufferCaptureCap; + + /* + There have been reports that indicate that at times the AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY is reported for every + call to IAudioCaptureClient_GetBuffer() above which results in spamming of the debug messages below. To partially + work around this, I'm only outputting these messages when MA_DEBUG_OUTPUT is explicitly defined. The better solution + would be to figure out why the flag is always getting reported. + */ +#if defined(MA_DEBUG_OUTPUT) + { + if (flags != 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Capture Flags: %ld\n", flags); + + if ((flags & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity (possible overrun). Attempting recovery. mappedBufferCaptureCap=%d\n", pDevice->wasapi.mappedBufferCaptureCap); + } + } + } +#endif + + /* Overrun detection. */ + if ((flags & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + /* Glitched. Probably due to an overrun. */ + + /* + If we got an overrun it probably means we're straddling the end of the buffer. In normal capture + mode this is the fault of the client application because they're responsible for ensuring data is + processed fast enough. In duplex mode, however, the processing of audio is tied to the playback + device, so this can possibly be the result of a timing de-sync. + + In capture mode we're not going to do any kind of recovery because the real fix is for the client + application to process faster. In duplex mode, we'll treat this as a desync and reset the buffers + to prevent a never-ending sequence of glitches due to straddling the end of the buffer. + */ + if (pDevice->type == ma_device_type_duplex) { + /* + Experiment: + + If we empty out the *entire* buffer we may end up putting ourselves into an underrun position + which isn't really any better than the overrun we're probably in right now. Instead we'll just + empty out about half. + */ + ma_uint32 i; + ma_uint32 periodCount = (pDevice->wasapi.actualBufferSizeInFramesCapture / pDevice->wasapi.periodSizeInFramesCapture); + ma_uint32 iterationCount = periodCount / 2; + if ((periodCount % 2) > 0) { + iterationCount += 1; + } + + for (i = 0; i < iterationCount; i += 1) { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: IAudioCaptureClient_ReleaseBuffer() failed with %ld.\n", hr); + break; + } + + flags = 0; + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pDevice->wasapi.pMappedBufferCapture, &pDevice->wasapi.mappedBufferCaptureCap, &flags, NULL, NULL); + if (hr == MA_AUDCLNT_S_BUFFER_EMPTY || FAILED(hr)) { + /* + The buffer has been completely emptied or an error occurred. In this case we'll need + to reset the state of the mapped buffer which will trigger the next iteration to get + a fresh buffer from WASAPI. + */ + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + + if (hr == MA_AUDCLNT_S_BUFFER_EMPTY) { + if ((flags & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: Buffer emptied, and data discontinuity still reported.\n"); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: Buffer emptied.\n"); + } + } + + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: IAudioCaptureClient_GetBuffer() failed with %ld.\n", hr); + } + + break; + } + } + + /* If at this point we have a valid buffer mapped, make sure the buffer length is set appropriately. */ + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + pDevice->wasapi.mappedBufferCaptureLen = pDevice->wasapi.mappedBufferCaptureCap; + } + } + } + + continue; + } else { + if (hr == MA_AUDCLNT_S_BUFFER_EMPTY || hr == MA_AUDCLNT_E_BUFFER_ERROR) { + /* + No data is available. We need to wait for more. There's two situations to consider + here. The first is normal capture mode. If this times out it probably means the + microphone isn't delivering data for whatever reason. In this case we'll just + abort the read and return whatever we were able to get. The other situations is + loopback mode, in which case a timeout probably just means the nothing is playing + through the speakers. + */ + + /* Experiment: Use a shorter timeout for loopback mode. */ + DWORD timeoutInMilliseconds = MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS; + if (pDevice->type == ma_device_type_loopback) { + timeoutInMilliseconds = 10; + } + + if (WaitForSingleObject((HANDLE)pDevice->wasapi.hEventCapture, timeoutInMilliseconds) != WAIT_OBJECT_0) { + if (pDevice->type == ma_device_type_loopback) { + continue; /* Keep waiting in loopback mode. */ + } else { + result = MA_ERROR; + break; /* Wait failed. */ + } + } + + /* At this point we should be able to loop back to the start of the loop and try retrieving a data buffer again. */ + } else { + /* An error occured and we need to abort. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for reading from the device. HRESULT = %d. Stopping device.\n", (int)hr); + result = ma_result_from_HRESULT(hr); + break; + } + } + } + } + + /* + If we were unable to process the entire requested frame count, but we still have a mapped buffer, + there's a good chance either an error occurred or the device was stopped mid-read. In this case + we'll need to make sure the buffer is released. + */ + if (totalFramesProcessed < frameCount && pDevice->wasapi.pMappedBufferCapture != NULL) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesProcessed; + } + + return result; +} + +static ma_result ma_device_write__wasapi(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalFramesProcessed = 0; + + /* Keep writing to the device until it's stopped or we've consumed all of our input. */ + while (ma_device_get_state(pDevice) == ma_device_state_started && totalFramesProcessed < frameCount) { + ma_uint32 framesRemaining = frameCount - totalFramesProcessed; + + /* + We're going to do this in a similar way to capture. We'll first check if the cached data pointer + is valid, and if so, read from that. Otherwise We will call IAudioRenderClient_GetBuffer() with + a requested buffer size equal to our actual period size. If it returns AUDCLNT_E_BUFFER_TOO_LARGE + it means we need to wait for some data to become available. + */ + if (pDevice->wasapi.pMappedBufferPlayback != NULL) { + /* We still have some space available in the mapped data buffer. Write to it. */ + ma_uint32 framesToProcessNow = framesRemaining; + if (framesToProcessNow > (pDevice->wasapi.mappedBufferPlaybackCap - pDevice->wasapi.mappedBufferPlaybackLen)) { + framesToProcessNow = (pDevice->wasapi.mappedBufferPlaybackCap - pDevice->wasapi.mappedBufferPlaybackLen); + } + + /* Now just copy the data over to the output buffer. */ + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pDevice->wasapi.pMappedBufferPlayback, pDevice->wasapi.mappedBufferPlaybackLen, pDevice->playback.internalFormat, pDevice->playback.internalChannels), + ma_offset_pcm_frames_const_ptr(pFrames, totalFramesProcessed, pDevice->playback.internalFormat, pDevice->playback.internalChannels), + framesToProcessNow, + pDevice->playback.internalFormat, pDevice->playback.internalChannels + ); + + totalFramesProcessed += framesToProcessNow; + pDevice->wasapi.mappedBufferPlaybackLen += framesToProcessNow; + + /* If the data buffer has been fully consumed we need to release it. */ + if (pDevice->wasapi.mappedBufferPlaybackLen == pDevice->wasapi.mappedBufferPlaybackCap) { + ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0); + pDevice->wasapi.pMappedBufferPlayback = NULL; + pDevice->wasapi.mappedBufferPlaybackCap = 0; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + + /* + In exclusive mode we need to wait here. Exclusive mode is weird because GetBuffer() never + seems to return AUDCLNT_E_BUFFER_TOO_LARGE, which is what we normally use to determine + whether or not we need to wait for more data. + */ + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + if (WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; /* Wait failed. Probably timed out. */ + } + } + } + } else { + /* We don't have a mapped data buffer so we'll need to get one. */ + HRESULT hr; + ma_uint32 bufferSizeInFrames; + + /* Special rules for exclusive mode. */ + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + bufferSizeInFrames = pDevice->wasapi.actualBufferSizeInFramesPlayback; + } else { + bufferSizeInFrames = pDevice->wasapi.periodSizeInFramesPlayback; + } + + hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, bufferSizeInFrames, (BYTE**)&pDevice->wasapi.pMappedBufferPlayback); + if (hr == S_OK) { + /* We have data available. */ + pDevice->wasapi.mappedBufferPlaybackCap = bufferSizeInFrames; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + } else { + if (hr == MA_AUDCLNT_E_BUFFER_TOO_LARGE || hr == MA_AUDCLNT_E_BUFFER_ERROR) { + /* Not enough data available. We need to wait for more. */ + if (WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; /* Wait failed. Probably timed out. */ + } + } else { + /* Some error occurred. We'll need to abort. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device. HRESULT = %d. Stopping device.\n", (int)hr); + result = ma_result_from_HRESULT(hr); + break; + } + } + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalFramesProcessed; + } + + return result; +} + +static ma_result ma_device_data_loop_wakeup__wasapi(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + SetEvent((HANDLE)pDevice->wasapi.hEventCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + SetEvent((HANDLE)pDevice->wasapi.hEventPlayback); + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__wasapi(ma_context* pContext) +{ + ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_QUIT__WASAPI); + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_wasapi); + + ma_context_post_command__wasapi(pContext, &cmd); + ma_thread_wait(&pContext->wasapi.commandThread); + + if (pContext->wasapi.hAvrt) { + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hAvrt); + pContext->wasapi.hAvrt = NULL; + } + +#if defined(MA_WIN32_UWP) + { + if (pContext->wasapi.hMMDevapi) { + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hMMDevapi); + pContext->wasapi.hMMDevapi = NULL; + } + } +#endif + + /* Only after the thread has been terminated can we uninitialize the sync objects for the command thread. */ + ma_semaphore_uninit(&pContext->wasapi.commandSem); + ma_mutex_uninit(&pContext->wasapi.commandLock); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__wasapi(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result = MA_SUCCESS; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + +#ifdef MA_WIN32_DESKTOP + /* + WASAPI is only supported in Vista SP1 and newer. The reason for SP1 and not the base version of Vista is that event-driven + exclusive mode does not work until SP1. + + Unfortunately older compilers don't define these functions so we need to dynamically load them in order to avoid a link error. + */ + { + ma_OSVERSIONINFOEXW osvi; + ma_handle kernel32DLL; + ma_PFNVerifyVersionInfoW _VerifyVersionInfoW; + ma_PFNVerSetConditionMask _VerSetConditionMask; + + kernel32DLL = ma_dlopen(ma_context_get_log(pContext), "kernel32.dll"); + if (kernel32DLL == NULL) { + return MA_NO_BACKEND; + } + + _VerifyVersionInfoW = (ma_PFNVerifyVersionInfoW )ma_dlsym(ma_context_get_log(pContext), kernel32DLL, "VerifyVersionInfoW"); + _VerSetConditionMask = (ma_PFNVerSetConditionMask)ma_dlsym(ma_context_get_log(pContext), kernel32DLL, "VerSetConditionMask"); + if (_VerifyVersionInfoW == NULL || _VerSetConditionMask == NULL) { + ma_dlclose(ma_context_get_log(pContext), kernel32DLL); + return MA_NO_BACKEND; + } + + MA_ZERO_OBJECT(&osvi); + osvi.dwOSVersionInfoSize = sizeof(osvi); + osvi.dwMajorVersion = ((MA_WIN32_WINNT_VISTA >> 8) & 0xFF); + osvi.dwMinorVersion = ((MA_WIN32_WINNT_VISTA >> 0) & 0xFF); + osvi.wServicePackMajor = 1; + if (_VerifyVersionInfoW(&osvi, MA_VER_MAJORVERSION | MA_VER_MINORVERSION | MA_VER_SERVICEPACKMAJOR, _VerSetConditionMask(_VerSetConditionMask(_VerSetConditionMask(0, MA_VER_MAJORVERSION, MA_VER_GREATER_EQUAL), MA_VER_MINORVERSION, MA_VER_GREATER_EQUAL), MA_VER_SERVICEPACKMAJOR, MA_VER_GREATER_EQUAL))) { + result = MA_SUCCESS; + } else { + result = MA_NO_BACKEND; + } + + ma_dlclose(ma_context_get_log(pContext), kernel32DLL); + } +#endif + + if (result != MA_SUCCESS) { + return result; + } + + MA_ZERO_OBJECT(&pContext->wasapi); + + /* + Annoyingly, WASAPI does not allow you to release an IAudioClient object from a different thread + than the one that retrieved it with GetService(). This can result in a deadlock in two + situations: + + 1) When calling ma_device_uninit() from a different thread to ma_device_init(); and + 2) When uninitializing and reinitializing the internal IAudioClient object in response to + automatic stream routing. + + We could define ma_device_uninit() such that it must be called on the same thread as + ma_device_init(). We could also just not release the IAudioClient when performing automatic + stream routing to avoid the deadlock. Neither of these are acceptable solutions in my view so + we're going to have to work around this with a worker thread. This is not ideal, but I can't + think of a better way to do this. + + More information about this can be found here: + + https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nn-audioclient-iaudiorenderclient + + Note this section: + + When releasing an IAudioRenderClient interface instance, the client must call the interface's + Release method from the same thread as the call to IAudioClient::GetService that created the + object. + */ + { + result = ma_mutex_init(&pContext->wasapi.commandLock); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_semaphore_init(0, &pContext->wasapi.commandSem); + if (result != MA_SUCCESS) { + ma_mutex_uninit(&pContext->wasapi.commandLock); + return result; + } + + result = ma_thread_create(&pContext->wasapi.commandThread, ma_thread_priority_normal, 0, ma_context_command_thread__wasapi, pContext, &pContext->allocationCallbacks); + if (result != MA_SUCCESS) { + ma_semaphore_uninit(&pContext->wasapi.commandSem); + ma_mutex_uninit(&pContext->wasapi.commandLock); + return result; + } + +#if defined(MA_WIN32_UWP) + { + /* Link to mmdevapi so we can get access to ActivateAudioInterfaceAsync(). */ + pContext->wasapi.hMMDevapi = ma_dlopen(ma_context_get_log(pContext), "mmdevapi.dll"); + if (pContext->wasapi.hMMDevapi) { + pContext->wasapi.ActivateAudioInterfaceAsync = ma_dlsym(ma_context_get_log(pContext), pContext->wasapi.hMMDevapi, "ActivateAudioInterfaceAsync"); + if (pContext->wasapi.ActivateAudioInterfaceAsync == NULL) { + ma_semaphore_uninit(&pContext->wasapi.commandSem); + ma_mutex_uninit(&pContext->wasapi.commandLock); + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hMMDevapi); + return MA_NO_BACKEND; /* ActivateAudioInterfaceAsync() could not be loaded. */ + } + } else { + ma_semaphore_uninit(&pContext->wasapi.commandSem); + ma_mutex_uninit(&pContext->wasapi.commandLock); + return MA_NO_BACKEND; /* Failed to load mmdevapi.dll which is required for ActivateAudioInterfaceAsync() */ + } + } +#endif + + /* Optionally use the Avrt API to specify the audio thread's latency sensitivity requirements */ + pContext->wasapi.hAvrt = ma_dlopen(ma_context_get_log(pContext), "avrt.dll"); + if (pContext->wasapi.hAvrt) { + pContext->wasapi.AvSetMmThreadCharacteristicsA = ma_dlsym(ma_context_get_log(pContext), pContext->wasapi.hAvrt, "AvSetMmThreadCharacteristicsA"); + pContext->wasapi.AvRevertMmThreadcharacteristics = ma_dlsym(ma_context_get_log(pContext), pContext->wasapi.hAvrt, "AvRevertMmThreadCharacteristics"); + + /* If either function could not be found, disable use of avrt entirely. */ + if (!pContext->wasapi.AvSetMmThreadCharacteristicsA || !pContext->wasapi.AvRevertMmThreadcharacteristics) { + pContext->wasapi.AvSetMmThreadCharacteristicsA = NULL; + pContext->wasapi.AvRevertMmThreadcharacteristics = NULL; + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hAvrt); + pContext->wasapi.hAvrt = NULL; + } + } + } + + + pCallbacks->onContextInit = ma_context_init__wasapi; + pCallbacks->onContextUninit = ma_context_uninit__wasapi; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__wasapi; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__wasapi; + pCallbacks->onDeviceInit = ma_device_init__wasapi; + pCallbacks->onDeviceUninit = ma_device_uninit__wasapi; + pCallbacks->onDeviceStart = ma_device_start__wasapi; + pCallbacks->onDeviceStop = ma_device_stop__wasapi; + pCallbacks->onDeviceRead = ma_device_read__wasapi; + pCallbacks->onDeviceWrite = ma_device_write__wasapi; + pCallbacks->onDeviceDataLoop = NULL; + pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__wasapi; + + return MA_SUCCESS; +} +#endif + +/****************************************************************************** + +DirectSound Backend + +******************************************************************************/ +#ifdef MA_HAS_DSOUND +/*#include */ + +/*static const GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}};*/ + +/* miniaudio only uses priority or exclusive modes. */ +#define MA_DSSCL_NORMAL 1 +#define MA_DSSCL_PRIORITY 2 +#define MA_DSSCL_EXCLUSIVE 3 +#define MA_DSSCL_WRITEPRIMARY 4 + +#define MA_DSCAPS_PRIMARYMONO 0x00000001 +#define MA_DSCAPS_PRIMARYSTEREO 0x00000002 +#define MA_DSCAPS_PRIMARY8BIT 0x00000004 +#define MA_DSCAPS_PRIMARY16BIT 0x00000008 +#define MA_DSCAPS_CONTINUOUSRATE 0x00000010 +#define MA_DSCAPS_EMULDRIVER 0x00000020 +#define MA_DSCAPS_CERTIFIED 0x00000040 +#define MA_DSCAPS_SECONDARYMONO 0x00000100 +#define MA_DSCAPS_SECONDARYSTEREO 0x00000200 +#define MA_DSCAPS_SECONDARY8BIT 0x00000400 +#define MA_DSCAPS_SECONDARY16BIT 0x00000800 + +#define MA_DSBCAPS_PRIMARYBUFFER 0x00000001 +#define MA_DSBCAPS_STATIC 0x00000002 +#define MA_DSBCAPS_LOCHARDWARE 0x00000004 +#define MA_DSBCAPS_LOCSOFTWARE 0x00000008 +#define MA_DSBCAPS_CTRL3D 0x00000010 +#define MA_DSBCAPS_CTRLFREQUENCY 0x00000020 +#define MA_DSBCAPS_CTRLPAN 0x00000040 +#define MA_DSBCAPS_CTRLVOLUME 0x00000080 +#define MA_DSBCAPS_CTRLPOSITIONNOTIFY 0x00000100 +#define MA_DSBCAPS_CTRLFX 0x00000200 +#define MA_DSBCAPS_STICKYFOCUS 0x00004000 +#define MA_DSBCAPS_GLOBALFOCUS 0x00008000 +#define MA_DSBCAPS_GETCURRENTPOSITION2 0x00010000 +#define MA_DSBCAPS_MUTE3DATMAXDISTANCE 0x00020000 +#define MA_DSBCAPS_LOCDEFER 0x00040000 +#define MA_DSBCAPS_TRUEPLAYPOSITION 0x00080000 + +#define MA_DSBPLAY_LOOPING 0x00000001 +#define MA_DSBPLAY_LOCHARDWARE 0x00000002 +#define MA_DSBPLAY_LOCSOFTWARE 0x00000004 +#define MA_DSBPLAY_TERMINATEBY_TIME 0x00000008 +#define MA_DSBPLAY_TERMINATEBY_DISTANCE 0x00000010 +#define MA_DSBPLAY_TERMINATEBY_PRIORITY 0x00000020 + +#define MA_DSCBSTART_LOOPING 0x00000001 + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; + MA_WAVEFORMATEX* lpwfxFormat; + GUID guid3DAlgorithm; +} MA_DSBUFFERDESC; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; + MA_WAVEFORMATEX* lpwfxFormat; + DWORD dwFXCount; + void* lpDSCFXDesc; /* <-- miniaudio doesn't use this, so set to void*. */ +} MA_DSCBUFFERDESC; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwMinSecondarySampleRate; + DWORD dwMaxSecondarySampleRate; + DWORD dwPrimaryBuffers; + DWORD dwMaxHwMixingAllBuffers; + DWORD dwMaxHwMixingStaticBuffers; + DWORD dwMaxHwMixingStreamingBuffers; + DWORD dwFreeHwMixingAllBuffers; + DWORD dwFreeHwMixingStaticBuffers; + DWORD dwFreeHwMixingStreamingBuffers; + DWORD dwMaxHw3DAllBuffers; + DWORD dwMaxHw3DStaticBuffers; + DWORD dwMaxHw3DStreamingBuffers; + DWORD dwFreeHw3DAllBuffers; + DWORD dwFreeHw3DStaticBuffers; + DWORD dwFreeHw3DStreamingBuffers; + DWORD dwTotalHwMemBytes; + DWORD dwFreeHwMemBytes; + DWORD dwMaxContigFreeHwMemBytes; + DWORD dwUnlockTransferRateHwBuffers; + DWORD dwPlayCpuOverheadSwBuffers; + DWORD dwReserved1; + DWORD dwReserved2; +} MA_DSCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwUnlockTransferRate; + DWORD dwPlayCpuOverhead; +} MA_DSBCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwFormats; + DWORD dwChannels; +} MA_DSCCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; +} MA_DSCBCAPS; + +typedef struct +{ + DWORD dwOffset; + HANDLE hEventNotify; +} MA_DSBPOSITIONNOTIFY; + +typedef struct ma_IDirectSound ma_IDirectSound; +typedef struct ma_IDirectSoundBuffer ma_IDirectSoundBuffer; +typedef struct ma_IDirectSoundCapture ma_IDirectSoundCapture; +typedef struct ma_IDirectSoundCaptureBuffer ma_IDirectSoundCaptureBuffer; +typedef struct ma_IDirectSoundNotify ma_IDirectSoundNotify; + + +/* +COM objects. The way these work is that you have a vtable (a list of function pointers, kind of +like how C++ works internally), and then you have a structure with a single member, which is a +pointer to the vtable. The vtable is where the methods of the object are defined. Methods need +to be in a specific order, and parent classes need to have their methods declared first. +*/ + +/* IDirectSound */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSound* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSound* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSound* pThis); + + /* IDirectSound */ + HRESULT (STDMETHODCALLTYPE * CreateSoundBuffer) (ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter); + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps); + HRESULT (STDMETHODCALLTYPE * DuplicateSoundBuffer)(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate); + HRESULT (STDMETHODCALLTYPE * SetCooperativeLevel) (ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel); + HRESULT (STDMETHODCALLTYPE * Compact) (ma_IDirectSound* pThis); + HRESULT (STDMETHODCALLTYPE * GetSpeakerConfig) (ma_IDirectSound* pThis, DWORD* pSpeakerConfig); + HRESULT (STDMETHODCALLTYPE * SetSpeakerConfig) (ma_IDirectSound* pThis, DWORD dwSpeakerConfig); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSound* pThis, const GUID* pGuidDevice); +} ma_IDirectSoundVtbl; +struct ma_IDirectSound +{ + ma_IDirectSoundVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); } +static MA_INLINE HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); } +static MA_INLINE HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); } +static MA_INLINE HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); } +static MA_INLINE HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); } +static MA_INLINE HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); } +static MA_INLINE HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); } +static MA_INLINE HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } + + +/* IDirectSoundBuffer */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundBuffer* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundBuffer* pThis); + + /* IDirectSoundBuffer */ + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps); + HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor); + HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten); + HRESULT (STDMETHODCALLTYPE * GetVolume) (ma_IDirectSoundBuffer* pThis, LONG* pVolume); + HRESULT (STDMETHODCALLTYPE * GetPan) (ma_IDirectSoundBuffer* pThis, LONG* pPan); + HRESULT (STDMETHODCALLTYPE * GetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD* pFrequency); + HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundBuffer* pThis, DWORD* pStatus); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc); + HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Play) (ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * SetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition); + HRESULT (STDMETHODCALLTYPE * SetFormat) (ma_IDirectSoundBuffer* pThis, const MA_WAVEFORMATEX* pFormat); + HRESULT (STDMETHODCALLTYPE * SetVolume) (ma_IDirectSoundBuffer* pThis, LONG volume); + HRESULT (STDMETHODCALLTYPE * SetPan) (ma_IDirectSoundBuffer* pThis, LONG pan); + HRESULT (STDMETHODCALLTYPE * SetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD dwFrequency); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundBuffer* pThis); + HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2); + HRESULT (STDMETHODCALLTYPE * Restore) (ma_IDirectSoundBuffer* pThis); +} ma_IDirectSoundBufferVtbl; +struct ma_IDirectSoundBuffer +{ + ma_IDirectSoundBufferVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const MA_WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); } + + +/* IDirectSoundCapture */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCapture* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCapture* pThis); + + /* IDirectSoundCapture */ + HRESULT (STDMETHODCALLTYPE * CreateCaptureBuffer)(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter); + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice); +} ma_IDirectSoundCaptureVtbl; +struct ma_IDirectSoundCapture +{ + ma_IDirectSoundCaptureVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundCapture_QueryInterface (ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundCapture_AddRef (ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundCapture_Release (ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } + + +/* IDirectSoundCaptureBuffer */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCaptureBuffer* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCaptureBuffer* pThis); + + /* IDirectSoundCaptureBuffer */ + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps); + HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition); + HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundCaptureBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten); + HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc); + HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundCaptureBuffer* pThis); + HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2); +} ma_IDirectSoundCaptureBufferVtbl; +struct ma_IDirectSoundCaptureBuffer +{ + ma_IDirectSoundCaptureBufferVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } + + +/* IDirectSoundNotify */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundNotify* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundNotify* pThis); + + /* IDirectSoundNotify */ + HRESULT (STDMETHODCALLTYPE * SetNotificationPositions)(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies); +} ma_IDirectSoundNotifyVtbl; +struct ma_IDirectSoundNotify +{ + ma_IDirectSoundNotifyVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); } + + +typedef BOOL (CALLBACK * ma_DSEnumCallbackAProc) (GUID* pDeviceGUID, const char* pDeviceDescription, const char* pModule, void* pContext); +typedef HRESULT (WINAPI * ma_DirectSoundCreateProc) (const GUID* pcGuidDevice, ma_IDirectSound** ppDS8, ma_IUnknown* pUnkOuter); +typedef HRESULT (WINAPI * ma_DirectSoundEnumerateAProc) (ma_DSEnumCallbackAProc pDSEnumCallback, void* pContext); +typedef HRESULT (WINAPI * ma_DirectSoundCaptureCreateProc) (const GUID* pcGuidDevice, ma_IDirectSoundCapture** ppDSC8, ma_IUnknown* pUnkOuter); +typedef HRESULT (WINAPI * ma_DirectSoundCaptureEnumerateAProc)(ma_DSEnumCallbackAProc pDSEnumCallback, void* pContext); + +static ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax) +{ + /* Normalize the range in case we were given something stupid. */ + if (sampleRateMin < (ma_uint32)ma_standard_sample_rate_min) { + sampleRateMin = (ma_uint32)ma_standard_sample_rate_min; + } + if (sampleRateMax > (ma_uint32)ma_standard_sample_rate_max) { + sampleRateMax = (ma_uint32)ma_standard_sample_rate_max; + } + if (sampleRateMin > sampleRateMax) { + sampleRateMin = sampleRateMax; + } + + if (sampleRateMin == sampleRateMax) { + return sampleRateMax; + } else { + size_t iStandardRate; + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) { + return standardRate; + } + } + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +/* +Retrieves the channel count and channel map for the given speaker configuration. If the speaker configuration is unknown, +the channel count and channel map will be left unmodified. +*/ +static void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut) +{ + WORD channels; + DWORD channelMap; + + channels = 0; + if (pChannelsOut != NULL) { + channels = *pChannelsOut; + } + + channelMap = 0; + if (pChannelMapOut != NULL) { + channelMap = *pChannelMapOut; + } + + /* + The speaker configuration is a combination of speaker config and speaker geometry. The lower 8 bits is what we care about. The upper + 16 bits is for the geometry. + */ + switch ((BYTE)(speakerConfig)) { + case 1 /*DSSPEAKER_HEADPHONE*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 2 /*DSSPEAKER_MONO*/: channels = 1; channelMap = SPEAKER_FRONT_CENTER; break; + case 3 /*DSSPEAKER_QUAD*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 4 /*DSSPEAKER_STEREO*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 5 /*DSSPEAKER_SURROUND*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_BACK_CENTER; break; + case 6 /*DSSPEAKER_5POINT1_BACK*/ /*DSSPEAKER_5POINT1*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 7 /*DSSPEAKER_7POINT1_WIDE*/ /*DSSPEAKER_7POINT1*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break; + case 8 /*DSSPEAKER_7POINT1_SURROUND*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; + case 9 /*DSSPEAKER_5POINT1_SURROUND*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; + default: break; + } + + if (pChannelsOut != NULL) { + *pChannelsOut = channels; + } + + if (pChannelMapOut != NULL) { + *pChannelMapOut = channelMap; + } +} + + +static ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound) +{ + ma_IDirectSound* pDirectSound; + HWND hWnd; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSound != NULL); + + *ppDirectSound = NULL; + pDirectSound = NULL; + + if (FAILED(((ma_DirectSoundCreateProc)pContext->dsound.DirectSoundCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSound, NULL))) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCreate() failed for playback device."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* The cooperative level must be set before doing anything else. */ + hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)(); + if (hWnd == 0) { + hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)(); + } + + hr = ma_IDirectSound_SetCooperativeLevel(pDirectSound, hWnd, (shareMode == ma_share_mode_exclusive) ? MA_DSSCL_EXCLUSIVE : MA_DSSCL_PRIORITY); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_SetCooperateiveLevel() failed for playback device."); + return ma_result_from_HRESULT(hr); + } + + *ppDirectSound = pDirectSound; + return MA_SUCCESS; +} + +static ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture) +{ + ma_IDirectSoundCapture* pDirectSoundCapture; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSoundCapture != NULL); + + /* DirectSound does not support exclusive mode for capture. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + *ppDirectSoundCapture = NULL; + pDirectSoundCapture = NULL; + + hr = ((ma_DirectSoundCaptureCreateProc)pContext->dsound.DirectSoundCaptureCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSoundCapture, NULL); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCaptureCreate() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + + *ppDirectSoundCapture = pDirectSoundCapture; + return MA_SUCCESS; +} + +static ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate) +{ + HRESULT hr; + MA_DSCCAPS caps; + WORD bitsPerSample; + DWORD sampleRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDirectSoundCapture != NULL); + + if (pChannels) { + *pChannels = 0; + } + if (pBitsPerSample) { + *pBitsPerSample = 0; + } + if (pSampleRate) { + *pSampleRate = 0; + } + + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSoundCapture_GetCaps(pDirectSoundCapture, &caps); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_GetCaps() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + + if (pChannels) { + *pChannels = (WORD)caps.dwChannels; + } + + /* The device can support multiple formats. We just go through the different formats in order of priority and pick the first one. This the same type of system as the WinMM backend. */ + bitsPerSample = 16; + sampleRate = 48000; + + if (caps.dwChannels == 1) { + if ((caps.dwFormats & WAVE_FORMAT_48M16) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44M16) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2M16) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1M16) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96M16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((caps.dwFormats & WAVE_FORMAT_48M08) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44M08) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2M08) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1M08) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96M08) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */ + } + } + } else if (caps.dwChannels == 2) { + if ((caps.dwFormats & WAVE_FORMAT_48S16) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44S16) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2S16) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1S16) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96S16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((caps.dwFormats & WAVE_FORMAT_48S08) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44S08) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2S08) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1S08) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96S08) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */ + } + } + } + + if (pBitsPerSample) { + *pBitsPerSample = bitsPerSample; + } + if (pSampleRate) { + *pSampleRate = sampleRate; + } + + return MA_SUCCESS; +} + + +typedef struct +{ + ma_context* pContext; + ma_device_type deviceType; + ma_enum_devices_callback_proc callback; + void* pUserData; + ma_bool32 terminated; +} ma_context_enumerate_devices_callback_data__dsound; + +static BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(GUID* lpGuid, const char* lpcstrDescription, const char* lpcstrModule, void* lpContext) +{ + ma_context_enumerate_devices_callback_data__dsound* pData = (ma_context_enumerate_devices_callback_data__dsound*)lpContext; + ma_device_info deviceInfo; + + (void)lpcstrModule; + + MA_ZERO_OBJECT(&deviceInfo); + + /* ID. */ + if (lpGuid != NULL) { + MA_COPY_MEMORY(deviceInfo.id.dsound, lpGuid, 16); + } else { + MA_ZERO_MEMORY(deviceInfo.id.dsound, 16); + deviceInfo.isDefault = MA_TRUE; + } + + /* Name / Description */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), lpcstrDescription, (size_t)-1); + + + /* Call the callback function, but make sure we stop enumerating if the callee requested so. */ + MA_ASSERT(pData != NULL); + pData->terminated = (pData->callback(pData->pContext, pData->deviceType, &deviceInfo, pData->pUserData) == MA_FALSE); + if (pData->terminated) { + return FALSE; /* Stop enumeration. */ + } else { + return TRUE; /* Continue enumeration. */ + } +} + +static ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__dsound data; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + data.pContext = pContext; + data.callback = callback; + data.pUserData = pUserData; + data.terminated = MA_FALSE; + + /* Playback. */ + if (!data.terminated) { + data.deviceType = ma_device_type_playback; + ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data); + } + + /* Capture. */ + if (!data.terminated) { + data.deviceType = ma_device_type_capture; + ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data); + } + + return MA_SUCCESS; +} + + +typedef struct +{ + const ma_device_id* pDeviceID; + ma_device_info* pDeviceInfo; + ma_bool32 found; +} ma_context_get_device_info_callback_data__dsound; + +static BOOL CALLBACK ma_context_get_device_info_callback__dsound(GUID* lpGuid, const char* lpcstrDescription, const char* lpcstrModule, void* lpContext) +{ + ma_context_get_device_info_callback_data__dsound* pData = (ma_context_get_device_info_callback_data__dsound*)lpContext; + MA_ASSERT(pData != NULL); + + if ((pData->pDeviceID == NULL || ma_is_guid_null(pData->pDeviceID->dsound)) && (lpGuid == NULL || ma_is_guid_null(lpGuid))) { + /* Default device. */ + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1); + pData->pDeviceInfo->isDefault = MA_TRUE; + pData->found = MA_TRUE; + return FALSE; /* Stop enumeration. */ + } else { + /* Not the default device. */ + if (lpGuid != NULL && pData->pDeviceID != NULL) { + if (memcmp(pData->pDeviceID->dsound, lpGuid, sizeof(pData->pDeviceID->dsound)) == 0) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1); + pData->found = MA_TRUE; + return FALSE; /* Stop enumeration. */ + } + } + } + + (void)lpcstrModule; + return TRUE; +} + +static ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result; + HRESULT hr; + + if (pDeviceID != NULL) { + ma_context_get_device_info_callback_data__dsound data; + + /* ID. */ + MA_COPY_MEMORY(pDeviceInfo->id.dsound, pDeviceID->dsound, 16); + + /* Name / Description. This is retrieved by enumerating over each device until we find that one that matches the input ID. */ + data.pDeviceID = pDeviceID; + data.pDeviceInfo = pDeviceInfo; + data.found = MA_FALSE; + if (deviceType == ma_device_type_playback) { + ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_get_device_info_callback__dsound, &data); + } else { + ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_get_device_info_callback__dsound, &data); + } + + if (!data.found) { + return MA_NO_DEVICE; + } + } else { + /* I don't think there's a way to get the name of the default device with DirectSound. In this case we just need to use defaults. */ + + /* ID */ + MA_ZERO_MEMORY(pDeviceInfo->id.dsound, 16); + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + pDeviceInfo->isDefault = MA_TRUE; + } + + /* Retrieving detailed information is slightly different depending on the device type. */ + if (deviceType == ma_device_type_playback) { + /* Playback. */ + ma_IDirectSound* pDirectSound; + MA_DSCAPS caps; + WORD channels; + + result = ma_context_create_IDirectSound__dsound(pContext, ma_share_mode_shared, pDeviceID, &pDirectSound); + if (result != MA_SUCCESS) { + return result; + } + + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSound_GetCaps(pDirectSound, &caps); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device."); + return ma_result_from_HRESULT(hr); + } + + + /* Channels. Only a single channel count is reported for DirectSound. */ + if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) { + /* It supports at least stereo, but could support more. */ + DWORD speakerConfig; + + channels = 2; + + /* Look at the speaker configuration to get a better idea on the channel count. */ + hr = ma_IDirectSound_GetSpeakerConfig(pDirectSound, &speakerConfig); + if (SUCCEEDED(hr)) { + ma_get_channels_from_speaker_config__dsound(speakerConfig, &channels, NULL); + } + } else { + /* It does not support stereo, which means we are stuck with mono. */ + channels = 1; + } + + + /* + In DirectSound, our native formats are centered around sample rates. All formats are supported, and we're only reporting a single channel + count. However, DirectSound can report a range of supported sample rates. We're only going to include standard rates known by miniaudio + in order to keep the size of this within reason. + */ + if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) { + /* Multiple sample rates are supported. We'll report in order of our preferred sample rates. */ + size_t iStandardSampleRate; + for (iStandardSampleRate = 0; iStandardSampleRate < ma_countof(g_maStandardSampleRatePriorities); iStandardSampleRate += 1) { + ma_uint32 sampleRate = g_maStandardSampleRatePriorities[iStandardSampleRate]; + if (sampleRate >= caps.dwMinSecondarySampleRate && sampleRate <= caps.dwMaxSecondarySampleRate) { + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; + } + } + } else { + /* Only a single sample rate is supported. */ + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = caps.dwMaxSecondarySampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; + } + + ma_IDirectSound_Release(pDirectSound); + } else { + /* + Capture. This is a little different to playback due to the say the supported formats are reported. Technically capture + devices can support a number of different formats, but for simplicity and consistency with ma_device_init() I'm just + reporting the best format. + */ + ma_IDirectSoundCapture* pDirectSoundCapture; + WORD channels; + WORD bitsPerSample; + DWORD sampleRate; + + result = ma_context_create_IDirectSoundCapture__dsound(pContext, ma_share_mode_shared, pDeviceID, &pDirectSoundCapture); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, pDirectSoundCapture, &channels, &bitsPerSample, &sampleRate); + if (result != MA_SUCCESS) { + ma_IDirectSoundCapture_Release(pDirectSoundCapture); + return result; + } + + ma_IDirectSoundCapture_Release(pDirectSoundCapture); + + /* The format is always an integer format and is based on the bits per sample. */ + if (bitsPerSample == 8) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_u8; + } else if (bitsPerSample == 16) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s16; + } else if (bitsPerSample == 24) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s24; + } else if (bitsPerSample == 32) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s32; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + + pDeviceInfo->nativeDataFormats[0].channels = channels; + pDeviceInfo->nativeDataFormats[0].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + } + + return MA_SUCCESS; +} + + + +static ma_result ma_device_uninit__dsound(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->dsound.pCaptureBuffer != NULL) { + ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + } + if (pDevice->dsound.pCapture != NULL) { + ma_IDirectSoundCapture_Release((ma_IDirectSoundCapture*)pDevice->dsound.pCapture); + } + + if (pDevice->dsound.pPlaybackBuffer != NULL) { + ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer); + } + if (pDevice->dsound.pPlaybackPrimaryBuffer != NULL) { + ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer); + } + if (pDevice->dsound.pPlayback != NULL) { + ma_IDirectSound_Release((ma_IDirectSound*)pDevice->dsound.pPlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, MA_WAVEFORMATEXTENSIBLE* pWF) +{ + GUID subformat; + + if (format == ma_format_unknown) { + format = MA_DEFAULT_FORMAT; + } + + if (channels == 0) { + channels = MA_DEFAULT_CHANNELS; + } + + if (sampleRate == 0) { + sampleRate = MA_DEFAULT_SAMPLE_RATE; + } + + switch (format) + { + case ma_format_u8: + case ma_format_s16: + case ma_format_s24: + /*case ma_format_s24_32:*/ + case ma_format_s32: + { + subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + } break; + + case ma_format_f32: + { + subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + } break; + + default: + return MA_FORMAT_NOT_SUPPORTED; + } + + MA_ZERO_OBJECT(pWF); + pWF->cbSize = sizeof(*pWF); + pWF->wFormatTag = WAVE_FORMAT_EXTENSIBLE; + pWF->nChannels = (WORD)channels; + pWF->nSamplesPerSec = (DWORD)sampleRate; + pWF->wBitsPerSample = (WORD)(ma_get_bytes_per_sample(format)*8); + pWF->nBlockAlign = (WORD)(pWF->nChannels * pWF->wBitsPerSample / 8); + pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec; + pWF->Samples.wValidBitsPerSample = pWF->wBitsPerSample; + pWF->dwChannelMask = ma_channel_map_to_channel_mask__win32(pChannelMap, channels); + pWF->SubFormat = subformat; + + return MA_SUCCESS; +} + +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__dsound(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* + DirectSound has a minimum period size of 20ms. In practice, this doesn't seem to be enough for + reliable glitch-free processing so going to use 30ms instead. + */ + ma_uint32 minPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(30, nativeSampleRate); + ma_uint32 periodSizeInFrames; + + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, nativeSampleRate, performanceProfile); + if (periodSizeInFrames < minPeriodSizeInFrames) { + periodSizeInFrames = minPeriodSizeInFrames; + } + + return periodSizeInFrames; +} + +static ma_result ma_device_init__dsound(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->dsound); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* + Unfortunately DirectSound uses different APIs and data structures for playback and catpure devices. We need to initialize + the capture device first because we'll want to match it's buffer size and period count on the playback side if we're using + full-duplex mode. + */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEFORMATEXTENSIBLE wf; + MA_DSCBUFFERDESC descDS; + ma_uint32 periodSizeInFrames; + ma_uint32 periodCount; + char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */ + MA_WAVEFORMATEXTENSIBLE* pActualFormat; + + result = ma_config_to_WAVEFORMATEXTENSIBLE(pDescriptorCapture->format, pDescriptorCapture->channels, pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, &wf); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_create_IDirectSoundCapture__dsound(pDevice->pContext, pDescriptorCapture->shareMode, pDescriptorCapture->pDeviceID, (ma_IDirectSoundCapture**)&pDevice->dsound.pCapture); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pDevice->pContext, (ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &wf.nChannels, &wf.wBitsPerSample, &wf.nSamplesPerSec); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + wf.nBlockAlign = (WORD)(wf.nChannels * wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nBlockAlign * wf.nSamplesPerSec; + wf.Samples.wValidBitsPerSample = wf.wBitsPerSample; + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + + /* The size of the buffer must be a clean multiple of the period count. */ + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__dsound(pDescriptorCapture, wf.nSamplesPerSec, pConfig->performanceProfile); + periodCount = (pDescriptorCapture->periodCount > 0) ? pDescriptorCapture->periodCount : MA_DEFAULT_PERIODS; + + MA_ZERO_OBJECT(&descDS); + descDS.dwSize = sizeof(descDS); + descDS.dwFlags = 0; + descDS.dwBufferBytes = periodSizeInFrames * periodCount * wf.nBlockAlign; + descDS.lpwfxFormat = (MA_WAVEFORMATEX*)&wf; + hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_CreateCaptureBuffer() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + + /* Get the _actual_ properties of the buffer. */ + pActualFormat = (MA_WAVEFORMATEXTENSIBLE*)rawdata; + hr = ma_IDirectSoundCaptureBuffer_GetFormat((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, (MA_WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the capture device's buffer."); + return ma_result_from_HRESULT(hr); + } + + /* We can now start setting the output data formats. */ + pDescriptorCapture->format = ma_format_from_WAVEFORMATEX((MA_WAVEFORMATEX*)pActualFormat); + pDescriptorCapture->channels = pActualFormat->nChannels; + pDescriptorCapture->sampleRate = pActualFormat->nSamplesPerSec; + + /* Get the native channel map based on the channel mask. */ + if (pActualFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDescriptorCapture->channels, pDescriptorCapture->channelMap); + } else { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDescriptorCapture->channels, pDescriptorCapture->channelMap); + } + + /* + After getting the actual format the size of the buffer in frames may have actually changed. However, we want this to be as close to what the + user has asked for as possible, so let's go ahead and release the old capture buffer and create a new one in this case. + */ + if (periodSizeInFrames != (descDS.dwBufferBytes / ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) / periodCount)) { + descDS.dwBufferBytes = periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) * periodCount; + ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + + hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Second attempt at IDirectSoundCapture_CreateCaptureBuffer() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + } + + /* DirectSound should give us a buffer exactly the size we asked for. */ + pDescriptorCapture->periodSizeInFrames = periodSizeInFrames; + pDescriptorCapture->periodCount = periodCount; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEFORMATEXTENSIBLE wf; + MA_DSBUFFERDESC descDSPrimary; + MA_DSCAPS caps; + char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */ + MA_WAVEFORMATEXTENSIBLE* pActualFormat; + ma_uint32 periodSizeInFrames; + ma_uint32 periodCount; + MA_DSBUFFERDESC descDS; + WORD nativeChannelCount; + DWORD nativeChannelMask = 0; + + result = ma_config_to_WAVEFORMATEXTENSIBLE(pDescriptorPlayback->format, pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate, pDescriptorPlayback->channelMap, &wf); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_create_IDirectSound__dsound(pDevice->pContext, pDescriptorPlayback->shareMode, pDescriptorPlayback->pDeviceID, (ma_IDirectSound**)&pDevice->dsound.pPlayback); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + MA_ZERO_OBJECT(&descDSPrimary); + descDSPrimary.dwSize = sizeof(MA_DSBUFFERDESC); + descDSPrimary.dwFlags = MA_DSBCAPS_PRIMARYBUFFER | MA_DSBCAPS_CTRLVOLUME; + hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDSPrimary, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackPrimaryBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's primary buffer."); + return ma_result_from_HRESULT(hr); + } + + + /* We may want to make some adjustments to the format if we are using defaults. */ + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSound_GetCaps((ma_IDirectSound*)pDevice->dsound.pPlayback, &caps); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device."); + return ma_result_from_HRESULT(hr); + } + + if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) { + DWORD speakerConfig; + + /* It supports at least stereo, but could support more. */ + nativeChannelCount = 2; + + /* Look at the speaker configuration to get a better idea on the channel count. */ + if (SUCCEEDED(ma_IDirectSound_GetSpeakerConfig((ma_IDirectSound*)pDevice->dsound.pPlayback, &speakerConfig))) { + ma_get_channels_from_speaker_config__dsound(speakerConfig, &nativeChannelCount, &nativeChannelMask); + } + } else { + /* It does not support stereo, which means we are stuck with mono. */ + nativeChannelCount = 1; + nativeChannelMask = 0x00000001; + } + + if (pDescriptorPlayback->channels == 0) { + wf.nChannels = nativeChannelCount; + wf.dwChannelMask = nativeChannelMask; + } + + if (pDescriptorPlayback->sampleRate == 0) { + /* We base the sample rate on the values returned by GetCaps(). */ + if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) { + wf.nSamplesPerSec = ma_get_best_sample_rate_within_range(caps.dwMinSecondarySampleRate, caps.dwMaxSecondarySampleRate); + } else { + wf.nSamplesPerSec = caps.dwMaxSecondarySampleRate; + } + } + + wf.nBlockAlign = (WORD)(wf.nChannels * wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nBlockAlign * wf.nSamplesPerSec; + + /* + From MSDN: + + The method succeeds even if the hardware does not support the requested format; DirectSound sets the buffer to the closest + supported format. To determine whether this has happened, an application can call the GetFormat method for the primary buffer + and compare the result with the format that was requested with the SetFormat method. + */ + hr = ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (MA_WAVEFORMATEX*)&wf); + if (FAILED(hr)) { + /* + If setting of the format failed we'll try again with some fallback settings. On Windows 98 I have + observed that IEEE_FLOAT does not work. We'll therefore enforce PCM. I also had issues where a + sample rate of 48000 did not work correctly. Not sure if it was a driver issue or not, but will + use 44100 for the sample rate. + */ + wf.cbSize = 18; /* NOTE: Don't use sizeof(MA_WAVEFORMATEX) here because it's got an extra 2 bytes due to padding. */ + wf.wFormatTag = WAVE_FORMAT_PCM; + wf.wBitsPerSample = 16; + wf.nChannels = nativeChannelCount; + wf.nSamplesPerSec = 44100; + wf.nBlockAlign = wf.nChannels * (wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nSamplesPerSec * wf.nBlockAlign; + + hr = ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (MA_WAVEFORMATEX*)&wf); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to set format of playback device's primary buffer."); + return ma_result_from_HRESULT(hr); + } + } + + /* Get the _actual_ properties of the buffer. */ + pActualFormat = (MA_WAVEFORMATEXTENSIBLE*)rawdata; + hr = ma_IDirectSoundBuffer_GetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (MA_WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the playback device's primary buffer."); + return ma_result_from_HRESULT(hr); + } + + /* We now have enough information to start setting some output properties. */ + pDescriptorPlayback->format = ma_format_from_WAVEFORMATEX((MA_WAVEFORMATEX*)pActualFormat); + pDescriptorPlayback->channels = pActualFormat->nChannels; + pDescriptorPlayback->sampleRate = pActualFormat->nSamplesPerSec; + + /* Get the internal channel map based on the channel mask. */ + if (pActualFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDescriptorPlayback->channels, pDescriptorPlayback->channelMap); + } else { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDescriptorPlayback->channels, pDescriptorPlayback->channelMap); + } + + /* The size of the buffer must be a clean multiple of the period count. */ + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__dsound(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + periodCount = (pDescriptorPlayback->periodCount > 0) ? pDescriptorPlayback->periodCount : MA_DEFAULT_PERIODS; + + /* + Meaning of dwFlags (from MSDN): + + DSBCAPS_CTRLPOSITIONNOTIFY + The buffer has position notification capability. + + DSBCAPS_GLOBALFOCUS + With this flag set, an application using DirectSound can continue to play its buffers if the user switches focus to + another application, even if the new application uses DirectSound. + + DSBCAPS_GETCURRENTPOSITION2 + In the first version of DirectSound, the play cursor was significantly ahead of the actual playing sound on emulated + sound cards; it was directly behind the write cursor. Now, if the DSBCAPS_GETCURRENTPOSITION2 flag is specified, the + application can get a more accurate play cursor. + */ + MA_ZERO_OBJECT(&descDS); + descDS.dwSize = sizeof(descDS); + descDS.dwFlags = MA_DSBCAPS_CTRLPOSITIONNOTIFY | MA_DSBCAPS_GLOBALFOCUS | MA_DSBCAPS_GETCURRENTPOSITION2; + descDS.dwBufferBytes = periodSizeInFrames * periodCount * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels); + descDS.lpwfxFormat = (MA_WAVEFORMATEX*)pActualFormat; + hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDS, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's secondary buffer."); + return ma_result_from_HRESULT(hr); + } + + /* DirectSound should give us a buffer exactly the size we asked for. */ + pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames; + pDescriptorPlayback->periodCount = periodCount; + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_data_loop__dsound(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_uint32 bpfDeviceCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 bpfDevicePlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + HRESULT hr; + DWORD lockOffsetInBytesCapture; + DWORD lockSizeInBytesCapture; + DWORD mappedSizeInBytesCapture; + DWORD mappedDeviceFramesProcessedCapture; + void* pMappedDeviceBufferCapture; + DWORD lockOffsetInBytesPlayback; + DWORD lockSizeInBytesPlayback; + DWORD mappedSizeInBytesPlayback; + void* pMappedDeviceBufferPlayback; + DWORD prevReadCursorInBytesCapture = 0; + DWORD prevPlayCursorInBytesPlayback = 0; + ma_bool32 physicalPlayCursorLoopFlagPlayback = 0; + DWORD virtualWriteCursorInBytesPlayback = 0; + ma_bool32 virtualWriteCursorLoopFlagPlayback = 0; + ma_bool32 isPlaybackDeviceStarted = MA_FALSE; + ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */ + ma_uint32 waitTimeInMilliseconds = 1; + + MA_ASSERT(pDevice != NULL); + + /* The first thing to do is start the capture device. The playback device is only started after the first period is written. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + hr = ma_IDirectSoundCaptureBuffer_Start((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, MA_DSCBSTART_LOOPING); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Start() failed."); + return ma_result_from_HRESULT(hr); + } + } + + while (ma_device_get_state(pDevice) == ma_device_state_started) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + DWORD physicalCaptureCursorInBytes; + DWORD physicalReadCursorInBytes; + hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + /* If nothing is available we just sleep for a bit and return from this iteration. */ + if (physicalReadCursorInBytes == prevReadCursorInBytesCapture) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + /* + The current position has moved. We need to map all of the captured samples and write them to the playback device, making sure + we don't return until every frame has been copied over. + */ + if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) { + /* The capture position has not looped. This is the simple case. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture); + } else { + /* + The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, + do it again from the start. + */ + if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) { + /* Lock up to the end of the buffer. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture; + } else { + /* Lock starting from the start of the buffer. */ + lockOffsetInBytesCapture = 0; + lockSizeInBytesCapture = physicalReadCursorInBytes; + } + } + + if (lockSizeInBytesCapture == 0) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device."); + return ma_result_from_HRESULT(hr); + } + + + /* At this point we have some input data that we need to output. We do not return until every mapped frame of the input data is written to the playback device. */ + mappedDeviceFramesProcessedCapture = 0; + + for (;;) { /* Keep writing to the playback device. */ + ma_uint8 inputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 inputFramesInClientFormatCap = sizeof(inputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint8 outputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 outputFramesInClientFormatCap = sizeof(outputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint32 outputFramesInClientFormatCount; + ma_uint32 outputFramesInClientFormatConsumed = 0; + ma_uint64 clientCapturedFramesToProcess = ma_min(inputFramesInClientFormatCap, outputFramesInClientFormatCap); + ma_uint64 deviceCapturedFramesToProcess = (mappedSizeInBytesCapture / bpfDeviceCapture) - mappedDeviceFramesProcessedCapture; + void* pRunningMappedDeviceBufferCapture = ma_offset_ptr(pMappedDeviceBufferCapture, mappedDeviceFramesProcessedCapture * bpfDeviceCapture); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningMappedDeviceBufferCapture, &deviceCapturedFramesToProcess, inputFramesInClientFormat, &clientCapturedFramesToProcess); + if (result != MA_SUCCESS) { + break; + } + + outputFramesInClientFormatCount = (ma_uint32)clientCapturedFramesToProcess; + mappedDeviceFramesProcessedCapture += (ma_uint32)deviceCapturedFramesToProcess; + + ma_device__handle_data_callback(pDevice, outputFramesInClientFormat, inputFramesInClientFormat, (ma_uint32)clientCapturedFramesToProcess); + + /* At this point we have input and output data in client format. All we need to do now is convert it to the output device format. This may take a few passes. */ + for (;;) { + ma_uint32 framesWrittenThisIteration; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + DWORD availableBytesPlayback; + DWORD silentPaddingInBytes = 0; /* <-- Must be initialized to 0. */ + + /* We need the physical play and write cursors. */ + if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */ + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback): Play cursor has moved in front of the write cursor (same loop iteration). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback): Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } + + /* If there's no room available for writing we need to wait for more. */ + if (availableBytesPlayback == 0) { + /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */ + if (!isPlaybackDeviceStarted) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } else { + ma_sleep(waitTimeInMilliseconds); + continue; + } + } + + + /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */ + lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. Go up to the end of the buffer. */ + lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + } else { + /* Different loop iterations. Go up to the physical play cursor. */ + lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } + + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + /* + Experiment: If the playback buffer is being starved, pad it with some silence to get it back in sync. This will cause a glitch, but it may prevent + endless glitching due to it constantly running out of data. + */ + if (isPlaybackDeviceStarted) { + DWORD bytesQueuedForPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - availableBytesPlayback; + if (bytesQueuedForPlayback < (pDevice->playback.internalPeriodSizeInFrames*bpfDevicePlayback)) { + silentPaddingInBytes = (pDevice->playback.internalPeriodSizeInFrames*2*bpfDevicePlayback) - bytesQueuedForPlayback; + if (silentPaddingInBytes > lockSizeInBytesPlayback) { + silentPaddingInBytes = lockSizeInBytesPlayback; + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback) Playback buffer starved. availableBytesPlayback=%ld, silentPaddingInBytes=%ld\n", availableBytesPlayback, silentPaddingInBytes); + } + } + + /* At this point we have a buffer for output. */ + if (silentPaddingInBytes > 0) { + MA_ZERO_MEMORY(pMappedDeviceBufferPlayback, silentPaddingInBytes); + framesWrittenThisIteration = silentPaddingInBytes/bpfDevicePlayback; + } else { + ma_uint64 convertedFrameCountIn = (outputFramesInClientFormatCount - outputFramesInClientFormatConsumed); + ma_uint64 convertedFrameCountOut = mappedSizeInBytesPlayback/bpfDevicePlayback; + void* pConvertedFramesIn = ma_offset_ptr(outputFramesInClientFormat, outputFramesInClientFormatConsumed * bpfDevicePlayback); + void* pConvertedFramesOut = pMappedDeviceBufferPlayback; + + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesIn, &convertedFrameCountIn, pConvertedFramesOut, &convertedFrameCountOut); + if (result != MA_SUCCESS) { + break; + } + + outputFramesInClientFormatConsumed += (ma_uint32)convertedFrameCountOut; + framesWrittenThisIteration = (ma_uint32)convertedFrameCountOut; + } + + + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, framesWrittenThisIteration*bpfDevicePlayback, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfDevicePlayback; + if ((virtualWriteCursorInBytesPlayback/bpfDevicePlayback) == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods) { + virtualWriteCursorInBytesPlayback = 0; + virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; + } + + /* + We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds + a bit of a buffer to prevent the playback buffer from getting starved. + */ + framesWrittenToPlaybackDevice += framesWrittenThisIteration; + if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= (pDevice->playback.internalPeriodSizeInFrames*2)) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } + + if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfDevicePlayback) { + break; /* We're finished with the output data.*/ + } + } + + if (clientCapturedFramesToProcess == 0) { + break; /* We just consumed every input sample. */ + } + } + + + /* At this point we're done with the mapped portion of the capture buffer. */ + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device."); + return ma_result_from_HRESULT(hr); + } + prevReadCursorInBytesCapture = (lockOffsetInBytesCapture + mappedSizeInBytesCapture); + } break; + + + + case ma_device_type_capture: + { + DWORD physicalCaptureCursorInBytes; + DWORD physicalReadCursorInBytes; + hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes); + if (FAILED(hr)) { + return MA_ERROR; + } + + /* If the previous capture position is the same as the current position we need to wait a bit longer. */ + if (prevReadCursorInBytesCapture == physicalReadCursorInBytes) { + ma_sleep(waitTimeInMilliseconds); + continue; + } + + /* Getting here means we have capture data available. */ + if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) { + /* The capture position has not looped. This is the simple case. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture); + } else { + /* + The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, + do it again from the start. + */ + if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) { + /* Lock up to the end of the buffer. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture; + } else { + /* Lock starting from the start of the buffer. */ + lockOffsetInBytesCapture = 0; + lockSizeInBytesCapture = physicalReadCursorInBytes; + } + } + + if (lockSizeInBytesCapture < pDevice->capture.internalPeriodSizeInFrames) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device."); + result = ma_result_from_HRESULT(hr); + } + + if (lockSizeInBytesCapture != mappedSizeInBytesCapture) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[DirectSound] (Capture) lockSizeInBytesCapture=%ld != mappedSizeInBytesCapture=%ld\n", lockSizeInBytesCapture, mappedSizeInBytesCapture); + } + + ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfDeviceCapture, pMappedDeviceBufferCapture); + + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device."); + return ma_result_from_HRESULT(hr); + } + prevReadCursorInBytesCapture = lockOffsetInBytesCapture + mappedSizeInBytesCapture; + + if (prevReadCursorInBytesCapture == (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture)) { + prevReadCursorInBytesCapture = 0; + } + } break; + + + + case ma_device_type_playback: + { + DWORD availableBytesPlayback; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes); + if (FAILED(hr)) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */ + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Playback): Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Playback): Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } + + /* If there's no room available for writing we need to wait for more. */ + if (availableBytesPlayback < pDevice->playback.internalPeriodSizeInFrames) { + /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */ + if (availableBytesPlayback == 0 && !isPlaybackDeviceStarted) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } else { + ma_sleep(waitTimeInMilliseconds); + continue; + } + } + + /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */ + lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. Go up to the end of the buffer. */ + lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + } else { + /* Different loop iterations. Go up to the physical play cursor. */ + lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } + + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + /* At this point we have a buffer for output. */ + ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfDevicePlayback), pMappedDeviceBufferPlayback); + + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, mappedSizeInBytesPlayback, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + virtualWriteCursorInBytesPlayback += mappedSizeInBytesPlayback; + if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) { + virtualWriteCursorInBytesPlayback = 0; + virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; + } + + /* + We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds + a bit of a buffer to prevent the playback buffer from getting starved. + */ + framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfDevicePlayback; + if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= pDevice->playback.internalPeriodSizeInFrames) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } + } break; + + + default: return MA_INVALID_ARGS; /* Invalid device type. */ + } + + if (result != MA_SUCCESS) { + return result; + } + } + + /* Getting here means the device is being stopped. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + hr = ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Stop() failed."); + return ma_result_from_HRESULT(hr); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* The playback device should be drained before stopping. All we do is wait until the available bytes is equal to the size of the buffer. */ + if (isPlaybackDeviceStarted) { + for (;;) { + DWORD availableBytesPlayback = 0; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes); + if (FAILED(hr)) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + break; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + break; + } + } + + if (availableBytesPlayback >= (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback)) { + break; + } + + ma_sleep(waitTimeInMilliseconds); + } + } + + hr = ma_IDirectSoundBuffer_Stop((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Stop() failed."); + return ma_result_from_HRESULT(hr); + } + + ma_IDirectSoundBuffer_SetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__dsound(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_dsound); + + ma_dlclose(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__dsound(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->dsound.hDSoundDLL = ma_dlopen(ma_context_get_log(pContext), "dsound.dll"); + if (pContext->dsound.hDSoundDLL == NULL) { + return MA_API_NOT_FOUND; + } + + pContext->dsound.DirectSoundCreate = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundCreate"); + pContext->dsound.DirectSoundEnumerateA = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundEnumerateA"); + pContext->dsound.DirectSoundCaptureCreate = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundCaptureCreate"); + pContext->dsound.DirectSoundCaptureEnumerateA = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundCaptureEnumerateA"); + + /* + We need to support all functions or nothing. DirectSound with Windows 95 seems to not work too + well in my testing. For example, it's missing DirectSoundCaptureEnumerateA(). This is a convenient + place to just disable the DirectSound backend for Windows 95. + */ + if (pContext->dsound.DirectSoundCreate == NULL || + pContext->dsound.DirectSoundEnumerateA == NULL || + pContext->dsound.DirectSoundCaptureCreate == NULL || + pContext->dsound.DirectSoundCaptureEnumerateA == NULL) { + return MA_API_NOT_FOUND; + } + + pCallbacks->onContextInit = ma_context_init__dsound; + pCallbacks->onContextUninit = ma_context_uninit__dsound; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__dsound; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__dsound; + pCallbacks->onDeviceInit = ma_device_init__dsound; + pCallbacks->onDeviceUninit = ma_device_uninit__dsound; + pCallbacks->onDeviceStart = NULL; /* Not used. Started in onDeviceDataLoop. */ + pCallbacks->onDeviceStop = NULL; /* Not used. Stopped in onDeviceDataLoop. */ + pCallbacks->onDeviceRead = NULL; /* Not used. Data is read directly in onDeviceDataLoop. */ + pCallbacks->onDeviceWrite = NULL; /* Not used. Data is written directly in onDeviceDataLoop. */ + pCallbacks->onDeviceDataLoop = ma_device_data_loop__dsound; + + return MA_SUCCESS; +} +#endif + + + +/****************************************************************************** + +WinMM Backend + +******************************************************************************/ +#ifdef MA_HAS_WINMM + +/* +Some build configurations will exclude the WinMM API. An example is when WIN32_LEAN_AND_MEAN +is defined. We need to define the types and functions we need manually. +*/ +#define MA_MMSYSERR_NOERROR 0 +#define MA_MMSYSERR_ERROR 1 +#define MA_MMSYSERR_BADDEVICEID 2 +#define MA_MMSYSERR_INVALHANDLE 5 +#define MA_MMSYSERR_NOMEM 7 +#define MA_MMSYSERR_INVALFLAG 10 +#define MA_MMSYSERR_INVALPARAM 11 +#define MA_MMSYSERR_HANDLEBUSY 12 + +#define MA_CALLBACK_EVENT 0x00050000 +#define MA_WAVE_ALLOWSYNC 0x0002 + +#define MA_WHDR_DONE 0x00000001 +#define MA_WHDR_PREPARED 0x00000002 +#define MA_WHDR_BEGINLOOP 0x00000004 +#define MA_WHDR_ENDLOOP 0x00000008 +#define MA_WHDR_INQUEUE 0x00000010 + +#define MA_MAXPNAMELEN 32 + +typedef void* MA_HWAVEIN; +typedef void* MA_HWAVEOUT; +typedef UINT MA_MMRESULT; +typedef UINT MA_MMVERSION; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; +} MA_WAVEINCAPSA; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + DWORD dwSupport; +} MA_WAVEOUTCAPSA; + +typedef struct tagWAVEHDR +{ + char* lpData; + DWORD dwBufferLength; + DWORD dwBytesRecorded; + DWORD_PTR dwUser; + DWORD dwFlags; + DWORD dwLoops; + struct tagWAVEHDR* lpNext; + DWORD_PTR reserved; +} MA_WAVEHDR; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + DWORD dwSupport; + GUID ManufacturerGuid; + GUID ProductGuid; + GUID NameGuid; +} MA_WAVEOUTCAPS2A; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + GUID ManufacturerGuid; + GUID ProductGuid; + GUID NameGuid; +} MA_WAVEINCAPS2A; + +typedef UINT (WINAPI * MA_PFN_waveOutGetNumDevs)(void); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutGetDevCapsA)(ma_uintptr uDeviceID, MA_WAVEOUTCAPSA* pwoc, UINT cbwoc); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutOpen)(MA_HWAVEOUT* phwo, UINT uDeviceID, const MA_WAVEFORMATEX* pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutClose)(MA_HWAVEOUT hwo); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutPrepareHeader)(MA_HWAVEOUT hwo, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutUnprepareHeader)(MA_HWAVEOUT hwo, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutWrite)(MA_HWAVEOUT hwo, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutReset)(MA_HWAVEOUT hwo); +typedef UINT (WINAPI * MA_PFN_waveInGetNumDevs)(void); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInGetDevCapsA)(ma_uintptr uDeviceID, MA_WAVEINCAPSA* pwic, UINT cbwic); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInOpen)(MA_HWAVEIN* phwi, UINT uDeviceID, const MA_WAVEFORMATEX* pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInClose)(MA_HWAVEIN hwi); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInPrepareHeader)(MA_HWAVEIN hwi, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInUnprepareHeader)(MA_HWAVEIN hwi, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInAddBuffer)(MA_HWAVEIN hwi, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInStart)(MA_HWAVEIN hwi); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInReset)(MA_HWAVEIN hwi); + +static ma_result ma_result_from_MMRESULT(MA_MMRESULT resultMM) +{ + switch (resultMM) + { + case MA_MMSYSERR_NOERROR: return MA_SUCCESS; + case MA_MMSYSERR_BADDEVICEID: return MA_INVALID_ARGS; + case MA_MMSYSERR_INVALHANDLE: return MA_INVALID_ARGS; + case MA_MMSYSERR_NOMEM: return MA_OUT_OF_MEMORY; + case MA_MMSYSERR_INVALFLAG: return MA_INVALID_ARGS; + case MA_MMSYSERR_INVALPARAM: return MA_INVALID_ARGS; + case MA_MMSYSERR_HANDLEBUSY: return MA_BUSY; + case MA_MMSYSERR_ERROR: return MA_ERROR; + default: return MA_ERROR; + } +} + +static char* ma_find_last_character(char* str, char ch) +{ + char* last; + + if (str == NULL) { + return NULL; + } + + last = NULL; + while (*str != '\0') { + if (*str == ch) { + last = str; + } + + str += 1; + } + + return last; +} + +static ma_uint32 ma_get_period_size_in_bytes(ma_uint32 periodSizeInFrames, ma_format format, ma_uint32 channels) +{ + return periodSizeInFrames * ma_get_bytes_per_frame(format, channels); +} + + +/* +Our own "WAVECAPS" structure that contains generic information shared between WAVEOUTCAPS2 and WAVEINCAPS2 so +we can do things generically and typesafely. Names are being kept the same for consistency. +*/ +typedef struct +{ + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + GUID NameGuid; +} MA_WAVECAPSA; + +static ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate) +{ + WORD bitsPerSample = 0; + DWORD sampleRate = 0; + + if (pBitsPerSample) { + *pBitsPerSample = 0; + } + if (pSampleRate) { + *pSampleRate = 0; + } + + if (channels == 1) { + bitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48M16) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48M08) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) { + sampleRate = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } else { + bitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48S16) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48S08) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) { + sampleRate = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } + + if (pBitsPerSample) { + *pBitsPerSample = bitsPerSample; + } + if (pSampleRate) { + *pSampleRate = sampleRate; + } + + return MA_SUCCESS; +} + +static ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, MA_WAVEFORMATEX* pWF) +{ + ma_result result; + + MA_ASSERT(pWF != NULL); + + MA_ZERO_OBJECT(pWF); + pWF->cbSize = sizeof(*pWF); + pWF->wFormatTag = WAVE_FORMAT_PCM; + pWF->nChannels = (WORD)channels; + if (pWF->nChannels > 2) { + pWF->nChannels = 2; + } + + result = ma_get_best_info_from_formats_flags__winmm(dwFormats, channels, &pWF->wBitsPerSample, &pWF->nSamplesPerSec); + if (result != MA_SUCCESS) { + return result; + } + + pWF->nBlockAlign = (WORD)(pWF->nChannels * pWF->wBitsPerSample / 8); + pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec; + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo) +{ + WORD bitsPerSample; + DWORD sampleRate; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + /* + Name / Description + + Unfortunately the name specified in WAVE(OUT/IN)CAPS2 is limited to 31 characters. This results in an unprofessional looking + situation where the names of the devices are truncated. To help work around this, we need to look at the name GUID and try + looking in the registry for the full name. If we can't find it there, we need to just fall back to the default name. + */ + + /* Set the default to begin with. */ + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), pCaps->szPname, (size_t)-1); + + /* + Now try the registry. There's a few things to consider here: + - The name GUID can be null, in which we case we just need to stick to the original 31 characters. + - If the name GUID is not present in the registry we'll also need to stick to the original 31 characters. + - I like consistency, so I want the returned device names to be consistent with those returned by WASAPI and DirectSound. The + problem, however is that WASAPI and DirectSound use " ()" format (such as "Speakers (High Definition Audio)"), + but WinMM does not specificy the component name. From my admittedly limited testing, I've notice the component name seems to + usually fit within the 31 characters of the fixed sized buffer, so what I'm going to do is parse that string for the component + name, and then concatenate the name from the registry. + */ + if (!ma_is_guid_null(&pCaps->NameGuid)) { + WCHAR guidStrW[256]; + if (((MA_PFN_StringFromGUID2)pContext->win32.StringFromGUID2)(&pCaps->NameGuid, guidStrW, ma_countof(guidStrW)) > 0) { + char guidStr[256]; + char keyStr[1024]; + HKEY hKey; + + WideCharToMultiByte(CP_UTF8, 0, guidStrW, -1, guidStr, sizeof(guidStr), 0, FALSE); + + ma_strcpy_s(keyStr, sizeof(keyStr), "SYSTEM\\CurrentControlSet\\Control\\MediaCategories\\"); + ma_strcat_s(keyStr, sizeof(keyStr), guidStr); + + if (((MA_PFN_RegOpenKeyExA)pContext->win32.RegOpenKeyExA)(HKEY_LOCAL_MACHINE, keyStr, 0, KEY_READ, &hKey) == ERROR_SUCCESS) { + BYTE nameFromReg[512]; + DWORD nameFromRegSize = sizeof(nameFromReg); + LONG resultWin32 = ((MA_PFN_RegQueryValueExA)pContext->win32.RegQueryValueExA)(hKey, "Name", 0, NULL, (BYTE*)nameFromReg, (DWORD*)&nameFromRegSize); + ((MA_PFN_RegCloseKey)pContext->win32.RegCloseKey)(hKey); + + if (resultWin32 == ERROR_SUCCESS) { + /* We have the value from the registry, so now we need to construct the name string. */ + char name[1024]; + if (ma_strcpy_s(name, sizeof(name), pDeviceInfo->name) == 0) { + char* nameBeg = ma_find_last_character(name, '('); + if (nameBeg != NULL) { + size_t leadingLen = (nameBeg - name); + ma_strncpy_s(nameBeg + 1, sizeof(name) - leadingLen, (const char*)nameFromReg, (size_t)-1); + + /* The closing ")", if it can fit. */ + if (leadingLen + nameFromRegSize < sizeof(name)-1) { + ma_strcat_s(name, sizeof(name), ")"); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), name, (size_t)-1); + } + } + } + } + } + } + + + result = ma_get_best_info_from_formats_flags__winmm(pCaps->dwFormats, pCaps->wChannels, &bitsPerSample, &sampleRate); + if (result != MA_SUCCESS) { + return result; + } + + if (bitsPerSample == 8) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_u8; + } else if (bitsPerSample == 16) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s16; + } else if (bitsPerSample == 24) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s24; + } else if (bitsPerSample == 32) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s32; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + pDeviceInfo->nativeDataFormats[0].channels = pCaps->wChannels; + pDeviceInfo->nativeDataFormats[0].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo) +{ + MA_WAVECAPSA caps; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + caps.dwFormats = pCaps->dwFormats; + caps.wChannels = pCaps->wChannels; + caps.NameGuid = pCaps->NameGuid; + return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo); +} + +static ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo) +{ + MA_WAVECAPSA caps; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + caps.dwFormats = pCaps->dwFormats; + caps.wChannels = pCaps->wChannels; + caps.NameGuid = pCaps->NameGuid; + return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo); +} + + +static ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + UINT playbackDeviceCount; + UINT captureDeviceCount; + UINT iPlaybackDevice; + UINT iCaptureDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + playbackDeviceCount = ((MA_PFN_waveOutGetNumDevs)pContext->winmm.waveOutGetNumDevs)(); + for (iPlaybackDevice = 0; iPlaybackDevice < playbackDeviceCount; ++iPlaybackDevice) { + MA_MMRESULT result; + MA_WAVEOUTCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(iPlaybackDevice, (MA_WAVEOUTCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + ma_device_info deviceInfo; + + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.winmm = iPlaybackDevice; + + /* The first enumerated device is the default device. */ + if (iPlaybackDevice == 0) { + deviceInfo.isDefault = MA_TRUE; + } + + if (ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + return MA_SUCCESS; /* Enumeration was stopped. */ + } + } + } + } + + /* Capture. */ + captureDeviceCount = ((MA_PFN_waveInGetNumDevs)pContext->winmm.waveInGetNumDevs)(); + for (iCaptureDevice = 0; iCaptureDevice < captureDeviceCount; ++iCaptureDevice) { + MA_MMRESULT result; + MA_WAVEINCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(iCaptureDevice, (MA_WAVEINCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + ma_device_info deviceInfo; + + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.winmm = iCaptureDevice; + + /* The first enumerated device is the default device. */ + if (iCaptureDevice == 0) { + deviceInfo.isDefault = MA_TRUE; + } + + if (ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + return MA_SUCCESS; /* Enumeration was stopped. */ + } + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + UINT winMMDeviceID; + + MA_ASSERT(pContext != NULL); + + winMMDeviceID = 0; + if (pDeviceID != NULL) { + winMMDeviceID = (UINT)pDeviceID->winmm; + } + + pDeviceInfo->id.winmm = winMMDeviceID; + + /* The first ID is the default device. */ + if (winMMDeviceID == 0) { + pDeviceInfo->isDefault = MA_TRUE; + } + + if (deviceType == ma_device_type_playback) { + MA_MMRESULT result; + MA_WAVEOUTCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceID, (MA_WAVEOUTCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + return ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, pDeviceInfo); + } + } else { + MA_MMRESULT result; + MA_WAVEINCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceID, (MA_WAVEINCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + return ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, pDeviceInfo); + } + } + + return MA_NO_DEVICE; +} + + +static ma_result ma_device_uninit__winmm(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + CloseHandle((HANDLE)pDevice->winmm.hEventCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + CloseHandle((HANDLE)pDevice->winmm.hEventPlayback); + } + + ma_free(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks); + + MA_ZERO_OBJECT(&pDevice->winmm); /* Safety. */ + + return MA_SUCCESS; +} + +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__winmm(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* WinMM has a minimum period size of 40ms. */ + ma_uint32 minPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(40, nativeSampleRate); + ma_uint32 periodSizeInFrames; + + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, nativeSampleRate, performanceProfile); + if (periodSizeInFrames < minPeriodSizeInFrames) { + periodSizeInFrames = minPeriodSizeInFrames; + } + + return periodSizeInFrames; +} + +static ma_result ma_device_init__winmm(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + const char* errorMsg = ""; + ma_result errorCode = MA_ERROR; + ma_result result = MA_SUCCESS; + ma_uint32 heapSize; + UINT winMMDeviceIDPlayback = 0; + UINT winMMDeviceIDCapture = 0; + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->winmm); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exlusive mode with WinMM. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (pDescriptorPlayback->pDeviceID != NULL) { + winMMDeviceIDPlayback = (UINT)pDescriptorPlayback->pDeviceID->winmm; + } + if (pDescriptorCapture->pDeviceID != NULL) { + winMMDeviceIDCapture = (UINT)pDescriptorCapture->pDeviceID->winmm; + } + + /* The capture device needs to be initialized first. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEINCAPSA caps; + MA_WAVEFORMATEX wf; + MA_MMRESULT resultMM; + + /* We use an event to know when a new fragment needs to be enqueued. */ + pDevice->winmm.hEventCapture = (ma_handle)CreateEventA(NULL, TRUE, TRUE, NULL); + if (pDevice->winmm.hEventCapture == NULL) { + errorMsg = "[WinMM] Failed to create event for fragment enqueing for the capture device.", errorCode = ma_result_from_GetLastError(GetLastError()); + goto on_error; + } + + /* The format should be based on the device's actual format. */ + if (((MA_PFN_waveInGetDevCapsA)pDevice->pContext->winmm.waveInGetDevCapsA)(winMMDeviceIDCapture, &caps, sizeof(caps)) != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED; + goto on_error; + } + + result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf); + if (result != MA_SUCCESS) { + errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result; + goto on_error; + } + + resultMM = ((MA_PFN_waveInOpen)pDevice->pContext->winmm.waveInOpen)((MA_HWAVEIN*)&pDevice->winmm.hDeviceCapture, winMMDeviceIDCapture, &wf, (DWORD_PTR)pDevice->winmm.hEventCapture, (DWORD_PTR)pDevice, MA_CALLBACK_EVENT | MA_WAVE_ALLOWSYNC); + if (resultMM != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to open capture device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE; + goto on_error; + } + + pDescriptorCapture->format = ma_format_from_WAVEFORMATEX(&wf); + pDescriptorCapture->channels = wf.nChannels; + pDescriptorCapture->sampleRate = wf.nSamplesPerSec; + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + pDescriptorCapture->periodCount = pDescriptorCapture->periodCount; + pDescriptorCapture->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__winmm(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEOUTCAPSA caps; + MA_WAVEFORMATEX wf; + MA_MMRESULT resultMM; + + /* We use an event to know when a new fragment needs to be enqueued. */ + pDevice->winmm.hEventPlayback = (ma_handle)CreateEventA(NULL, TRUE, TRUE, NULL); + if (pDevice->winmm.hEventPlayback == NULL) { + errorMsg = "[WinMM] Failed to create event for fragment enqueing for the playback device.", errorCode = ma_result_from_GetLastError(GetLastError()); + goto on_error; + } + + /* The format should be based on the device's actual format. */ + if (((MA_PFN_waveOutGetDevCapsA)pDevice->pContext->winmm.waveOutGetDevCapsA)(winMMDeviceIDPlayback, &caps, sizeof(caps)) != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED; + goto on_error; + } + + result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf); + if (result != MA_SUCCESS) { + errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result; + goto on_error; + } + + resultMM = ((MA_PFN_waveOutOpen)pDevice->pContext->winmm.waveOutOpen)((MA_HWAVEOUT*)&pDevice->winmm.hDevicePlayback, winMMDeviceIDPlayback, &wf, (DWORD_PTR)pDevice->winmm.hEventPlayback, (DWORD_PTR)pDevice, MA_CALLBACK_EVENT | MA_WAVE_ALLOWSYNC); + if (resultMM != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to open playback device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE; + goto on_error; + } + + pDescriptorPlayback->format = ma_format_from_WAVEFORMATEX(&wf); + pDescriptorPlayback->channels = wf.nChannels; + pDescriptorPlayback->sampleRate = wf.nSamplesPerSec; + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels); + pDescriptorPlayback->periodCount = pDescriptorPlayback->periodCount; + pDescriptorPlayback->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__winmm(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + } + + /* + The heap allocated data is allocated like so: + + [Capture WAVEHDRs][Playback WAVEHDRs][Capture Intermediary Buffer][Playback Intermediary Buffer] + */ + heapSize = 0; + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + heapSize += sizeof(MA_WAVEHDR)*pDescriptorCapture->periodCount + (pDescriptorCapture->periodSizeInFrames * pDescriptorCapture->periodCount * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels)); + } + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + heapSize += sizeof(MA_WAVEHDR)*pDescriptorPlayback->periodCount + (pDescriptorPlayback->periodSizeInFrames * pDescriptorPlayback->periodCount * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels)); + } + + pDevice->winmm._pHeapData = (ma_uint8*)ma_calloc(heapSize, &pDevice->pContext->allocationCallbacks); + if (pDevice->winmm._pHeapData == NULL) { + errorMsg = "[WinMM] Failed to allocate memory for the intermediary buffer.", errorCode = MA_OUT_OF_MEMORY; + goto on_error; + } + + MA_ZERO_MEMORY(pDevice->winmm._pHeapData, heapSize); + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPeriod; + + if (pConfig->deviceType == ma_device_type_capture) { + pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount)); + } else { + pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount + pDescriptorPlayback->periodCount)); + } + + /* Prepare headers. */ + for (iPeriod = 0; iPeriod < pDescriptorCapture->periodCount; ++iPeriod) { + ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDescriptorCapture->periodSizeInFrames, pDescriptorCapture->format, pDescriptorCapture->channels); + + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].lpData = (char*)(pDevice->winmm.pIntermediaryBufferCapture + (periodSizeInBytes*iPeriod)); + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwBufferLength = periodSizeInBytes; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwFlags = 0L; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwLoops = 0L; + ((MA_PFN_waveInPrepareHeader)pDevice->pContext->winmm.waveInPrepareHeader)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(MA_WAVEHDR)); + + /* + The user data of the MA_WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means + it's unlocked and available for writing. A value of 1 means it's locked. + */ + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwUser = 0; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPeriod; + + if (pConfig->deviceType == ma_device_type_playback) { + pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*pDescriptorPlayback->periodCount); + } else { + pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount)); + pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount + pDescriptorPlayback->periodCount)) + (pDescriptorCapture->periodSizeInFrames*pDescriptorCapture->periodCount*ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels)); + } + + /* Prepare headers. */ + for (iPeriod = 0; iPeriod < pDescriptorPlayback->periodCount; ++iPeriod) { + ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDescriptorPlayback->periodSizeInFrames, pDescriptorPlayback->format, pDescriptorPlayback->channels); + + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].lpData = (char*)(pDevice->winmm.pIntermediaryBufferPlayback + (periodSizeInBytes*iPeriod)); + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwBufferLength = periodSizeInBytes; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwFlags = 0L; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwLoops = 0L; + ((MA_PFN_waveOutPrepareHeader)pDevice->pContext->winmm.waveOutPrepareHeader)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(MA_WAVEHDR)); + + /* + The user data of the MA_WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means + it's unlocked and available for writing. A value of 1 means it's locked. + */ + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwUser = 0; + } + } + + return MA_SUCCESS; + +on_error: + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.pWAVEHDRCapture != NULL) { + ma_uint32 iPeriod; + for (iPeriod = 0; iPeriod < pDescriptorCapture->periodCount; ++iPeriod) { + ((MA_PFN_waveInUnprepareHeader)pDevice->pContext->winmm.waveInUnprepareHeader)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(MA_WAVEHDR)); + } + } + + ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.pWAVEHDRCapture != NULL) { + ma_uint32 iPeriod; + for (iPeriod = 0; iPeriod < pDescriptorPlayback->periodCount; ++iPeriod) { + ((MA_PFN_waveOutUnprepareHeader)pDevice->pContext->winmm.waveOutUnprepareHeader)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(MA_WAVEHDR)); + } + } + + ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + } + + ma_free(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks); + + if (errorMsg != NULL && errorMsg[0] != '\0') { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "%s", errorMsg); + } + + return errorCode; +} + +static ma_result ma_device_start__winmm(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + MA_MMRESULT resultMM; + MA_WAVEHDR* pWAVEHDR; + ma_uint32 iPeriod; + + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture; + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventCapture); + + /* To start the device we attach all of the buffers and then start it. As the buffers are filled with data we will get notifications. */ + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(MA_WAVEHDR)); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] Failed to attach input buffers to capture device in preparation for capture."); + return ma_result_from_MMRESULT(resultMM); + } + + /* Make sure all of the buffers start out locked. We don't want to access them until the backend tells us we can. */ + pWAVEHDR[iPeriod].dwUser = 1; /* 1 = locked. */ + } + + /* Capture devices need to be explicitly started, unlike playback devices. */ + resultMM = ((MA_PFN_waveInStart)pDevice->pContext->winmm.waveInStart)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] Failed to start backend device."); + return ma_result_from_MMRESULT(resultMM); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* Don't need to do anything for playback. It'll be started automatically in ma_device_start__winmm(). */ + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__winmm(ma_device* pDevice) +{ + MA_MMRESULT resultMM; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.hDeviceCapture == NULL) { + return MA_INVALID_ARGS; + } + + resultMM = ((MA_PFN_waveInReset)pDevice->pContext->winmm.waveInReset)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WinMM] WARNING: Failed to reset capture device."); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_uint32 iPeriod; + MA_WAVEHDR* pWAVEHDR; + + if (pDevice->winmm.hDevicePlayback == NULL) { + return MA_INVALID_ARGS; + } + + /* We need to drain the device. To do this we just loop over each header and if it's locked just wait for the event. */ + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback; + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; iPeriod += 1) { + if (pWAVEHDR[iPeriod].dwUser == 1) { /* 1 = locked. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) { + break; /* An error occurred so just abandon ship and stop the device without draining. */ + } + + pWAVEHDR[iPeriod].dwUser = 0; + } + } + + resultMM = ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WinMM] WARNING: Failed to reset playback device."); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + MA_MMRESULT resultMM; + ma_uint32 totalFramesWritten; + MA_WAVEHDR* pWAVEHDR; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback; + + /* Keep processing as much data as possible. */ + totalFramesWritten = 0; + while (totalFramesWritten < frameCount) { + /* If the current header has some space available we need to write part of it. */ + if (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser == 0) { /* 0 = unlocked. */ + /* + This header has room in it. We copy as much of it as we can. If we end up fully consuming the buffer we need to + write it out and move on to the next iteration. + */ + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedPlayback; + + ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesWritten)); + const void* pSrc = ma_offset_ptr(pPCMFrames, totalFramesWritten*bpf); + void* pDst = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].lpData, pDevice->winmm.headerFramesConsumedPlayback*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); + + pDevice->winmm.headerFramesConsumedPlayback += framesToCopy; + totalFramesWritten += framesToCopy; + + /* If we've consumed the buffer entirely we need to write it out to the device. */ + if (pDevice->winmm.headerFramesConsumedPlayback == (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf)) { + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 1; /* 1 = locked. */ + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags &= ~MA_WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */ + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventPlayback); + + /* The device will be started here. */ + resultMM = ((MA_PFN_waveOutWrite)pDevice->pContext->winmm.waveOutWrite)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback, &pWAVEHDR[pDevice->winmm.iNextHeaderPlayback], sizeof(MA_WAVEHDR)); + if (resultMM != MA_MMSYSERR_NOERROR) { + result = ma_result_from_MMRESULT(resultMM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] waveOutWrite() failed."); + break; + } + + /* Make sure we move to the next header. */ + pDevice->winmm.iNextHeaderPlayback = (pDevice->winmm.iNextHeaderPlayback + 1) % pDevice->playback.internalPeriods; + pDevice->winmm.headerFramesConsumedPlayback = 0; + } + + /* If at this point we have consumed the entire input buffer we can return. */ + MA_ASSERT(totalFramesWritten <= frameCount); + if (totalFramesWritten == frameCount) { + break; + } + + /* Getting here means there's more to process. */ + continue; + } + + /* Getting here means there isn't enough room in the buffer and we need to wait for one to become available. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; + } + + /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */ + if ((pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags & MA_WHDR_DONE) != 0) { + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 0; /* 0 = unlocked (make it available for writing). */ + pDevice->winmm.headerFramesConsumedPlayback = 0; + } + + /* If the device has been stopped we need to break. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + break; + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalFramesWritten; + } + + return result; +} + +static ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + MA_MMRESULT resultMM; + ma_uint32 totalFramesRead; + MA_WAVEHDR* pWAVEHDR; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture; + + /* Keep processing as much data as possible. */ + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + /* If the current header has some space available we need to write part of it. */ + if (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser == 0) { /* 0 = unlocked. */ + /* The buffer is available for reading. If we fully consume it we need to add it back to the buffer. */ + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedCapture; + + ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesRead)); + const void* pSrc = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderCapture].lpData, pDevice->winmm.headerFramesConsumedCapture*bpf); + void* pDst = ma_offset_ptr(pPCMFrames, totalFramesRead*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); + + pDevice->winmm.headerFramesConsumedCapture += framesToCopy; + totalFramesRead += framesToCopy; + + /* If we've consumed the buffer entirely we need to add it back to the device. */ + if (pDevice->winmm.headerFramesConsumedCapture == (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf)) { + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 1; /* 1 = locked. */ + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags &= ~MA_WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */ + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventCapture); + + /* The device will be started here. */ + resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[pDevice->winmm.iNextHeaderCapture], sizeof(MA_WAVEHDR)); + if (resultMM != MA_MMSYSERR_NOERROR) { + result = ma_result_from_MMRESULT(resultMM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] waveInAddBuffer() failed."); + break; + } + + /* Make sure we move to the next header. */ + pDevice->winmm.iNextHeaderCapture = (pDevice->winmm.iNextHeaderCapture + 1) % pDevice->capture.internalPeriods; + pDevice->winmm.headerFramesConsumedCapture = 0; + } + + /* If at this point we have filled the entire input buffer we can return. */ + MA_ASSERT(totalFramesRead <= frameCount); + if (totalFramesRead == frameCount) { + break; + } + + /* Getting here means there's more to process. */ + continue; + } + + /* Getting here means there isn't enough any data left to send to the client which means we need to wait for more. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventCapture, INFINITE) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; + } + + /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */ + if ((pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags & MA_WHDR_DONE) != 0) { + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 0; /* 0 = unlocked (make it available for reading). */ + pDevice->winmm.headerFramesConsumedCapture = 0; + } + + /* If the device has been stopped we need to break. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + break; + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; +} + +static ma_result ma_context_uninit__winmm(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_winmm); + + ma_dlclose(ma_context_get_log(pContext), pContext->winmm.hWinMM); + return MA_SUCCESS; +} + +static ma_result ma_context_init__winmm(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->winmm.hWinMM = ma_dlopen(ma_context_get_log(pContext), "winmm.dll"); + if (pContext->winmm.hWinMM == NULL) { + return MA_NO_BACKEND; + } + + pContext->winmm.waveOutGetNumDevs = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutGetNumDevs"); + pContext->winmm.waveOutGetDevCapsA = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutGetDevCapsA"); + pContext->winmm.waveOutOpen = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutOpen"); + pContext->winmm.waveOutClose = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutClose"); + pContext->winmm.waveOutPrepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutPrepareHeader"); + pContext->winmm.waveOutUnprepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutUnprepareHeader"); + pContext->winmm.waveOutWrite = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutWrite"); + pContext->winmm.waveOutReset = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutReset"); + pContext->winmm.waveInGetNumDevs = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInGetNumDevs"); + pContext->winmm.waveInGetDevCapsA = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInGetDevCapsA"); + pContext->winmm.waveInOpen = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInOpen"); + pContext->winmm.waveInClose = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInClose"); + pContext->winmm.waveInPrepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInPrepareHeader"); + pContext->winmm.waveInUnprepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInUnprepareHeader"); + pContext->winmm.waveInAddBuffer = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInAddBuffer"); + pContext->winmm.waveInStart = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInStart"); + pContext->winmm.waveInReset = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInReset"); + + pCallbacks->onContextInit = ma_context_init__winmm; + pCallbacks->onContextUninit = ma_context_uninit__winmm; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__winmm; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__winmm; + pCallbacks->onDeviceInit = ma_device_init__winmm; + pCallbacks->onDeviceUninit = ma_device_uninit__winmm; + pCallbacks->onDeviceStart = ma_device_start__winmm; + pCallbacks->onDeviceStop = ma_device_stop__winmm; + pCallbacks->onDeviceRead = ma_device_read__winmm; + pCallbacks->onDeviceWrite = ma_device_write__winmm; + pCallbacks->onDeviceDataLoop = NULL; /* This is a blocking read-write API, so this can be NULL since miniaudio will manage the audio thread for us. */ + + return MA_SUCCESS; +} +#endif + + + + +/****************************************************************************** + +ALSA Backend + +******************************************************************************/ +#ifdef MA_HAS_ALSA + +#include /* poll(), struct pollfd */ +#include /* eventfd() */ + +#ifdef MA_NO_RUNTIME_LINKING + +/* asoundlib.h marks some functions with "inline" which isn't always supported. Need to emulate it. */ +#if !defined(__cplusplus) +#if defined(__STRICT_ANSI__) +#if !defined(inline) +#define inline __inline__ __attribute__((always_inline)) +#define MA_INLINE_DEFINED +#endif +#endif +#endif +#include +#if defined(MA_INLINE_DEFINED) +#undef inline +#undef MA_INLINE_DEFINED +#endif + +typedef snd_pcm_uframes_t ma_snd_pcm_uframes_t; +typedef snd_pcm_sframes_t ma_snd_pcm_sframes_t; +typedef snd_pcm_stream_t ma_snd_pcm_stream_t; +typedef snd_pcm_format_t ma_snd_pcm_format_t; +typedef snd_pcm_access_t ma_snd_pcm_access_t; +typedef snd_pcm_t ma_snd_pcm_t; +typedef snd_pcm_hw_params_t ma_snd_pcm_hw_params_t; +typedef snd_pcm_sw_params_t ma_snd_pcm_sw_params_t; +typedef snd_pcm_format_mask_t ma_snd_pcm_format_mask_t; +typedef snd_pcm_info_t ma_snd_pcm_info_t; +typedef snd_pcm_channel_area_t ma_snd_pcm_channel_area_t; +typedef snd_pcm_chmap_t ma_snd_pcm_chmap_t; +typedef snd_pcm_state_t ma_snd_pcm_state_t; + +/* snd_pcm_stream_t */ +#define MA_SND_PCM_STREAM_PLAYBACK SND_PCM_STREAM_PLAYBACK +#define MA_SND_PCM_STREAM_CAPTURE SND_PCM_STREAM_CAPTURE + +/* snd_pcm_format_t */ +#define MA_SND_PCM_FORMAT_UNKNOWN SND_PCM_FORMAT_UNKNOWN +#define MA_SND_PCM_FORMAT_U8 SND_PCM_FORMAT_U8 +#define MA_SND_PCM_FORMAT_S16_LE SND_PCM_FORMAT_S16_LE +#define MA_SND_PCM_FORMAT_S16_BE SND_PCM_FORMAT_S16_BE +#define MA_SND_PCM_FORMAT_S24_LE SND_PCM_FORMAT_S24_LE +#define MA_SND_PCM_FORMAT_S24_BE SND_PCM_FORMAT_S24_BE +#define MA_SND_PCM_FORMAT_S32_LE SND_PCM_FORMAT_S32_LE +#define MA_SND_PCM_FORMAT_S32_BE SND_PCM_FORMAT_S32_BE +#define MA_SND_PCM_FORMAT_FLOAT_LE SND_PCM_FORMAT_FLOAT_LE +#define MA_SND_PCM_FORMAT_FLOAT_BE SND_PCM_FORMAT_FLOAT_BE +#define MA_SND_PCM_FORMAT_FLOAT64_LE SND_PCM_FORMAT_FLOAT64_LE +#define MA_SND_PCM_FORMAT_FLOAT64_BE SND_PCM_FORMAT_FLOAT64_BE +#define MA_SND_PCM_FORMAT_MU_LAW SND_PCM_FORMAT_MU_LAW +#define MA_SND_PCM_FORMAT_A_LAW SND_PCM_FORMAT_A_LAW +#define MA_SND_PCM_FORMAT_S24_3LE SND_PCM_FORMAT_S24_3LE +#define MA_SND_PCM_FORMAT_S24_3BE SND_PCM_FORMAT_S24_3BE + +/* ma_snd_pcm_access_t */ +#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED SND_PCM_ACCESS_MMAP_INTERLEAVED +#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED SND_PCM_ACCESS_MMAP_NONINTERLEAVED +#define MA_SND_PCM_ACCESS_MMAP_COMPLEX SND_PCM_ACCESS_MMAP_COMPLEX +#define MA_SND_PCM_ACCESS_RW_INTERLEAVED SND_PCM_ACCESS_RW_INTERLEAVED +#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED SND_PCM_ACCESS_RW_NONINTERLEAVED + +/* Channel positions. */ +#define MA_SND_CHMAP_UNKNOWN SND_CHMAP_UNKNOWN +#define MA_SND_CHMAP_NA SND_CHMAP_NA +#define MA_SND_CHMAP_MONO SND_CHMAP_MONO +#define MA_SND_CHMAP_FL SND_CHMAP_FL +#define MA_SND_CHMAP_FR SND_CHMAP_FR +#define MA_SND_CHMAP_RL SND_CHMAP_RL +#define MA_SND_CHMAP_RR SND_CHMAP_RR +#define MA_SND_CHMAP_FC SND_CHMAP_FC +#define MA_SND_CHMAP_LFE SND_CHMAP_LFE +#define MA_SND_CHMAP_SL SND_CHMAP_SL +#define MA_SND_CHMAP_SR SND_CHMAP_SR +#define MA_SND_CHMAP_RC SND_CHMAP_RC +#define MA_SND_CHMAP_FLC SND_CHMAP_FLC +#define MA_SND_CHMAP_FRC SND_CHMAP_FRC +#define MA_SND_CHMAP_RLC SND_CHMAP_RLC +#define MA_SND_CHMAP_RRC SND_CHMAP_RRC +#define MA_SND_CHMAP_FLW SND_CHMAP_FLW +#define MA_SND_CHMAP_FRW SND_CHMAP_FRW +#define MA_SND_CHMAP_FLH SND_CHMAP_FLH +#define MA_SND_CHMAP_FCH SND_CHMAP_FCH +#define MA_SND_CHMAP_FRH SND_CHMAP_FRH +#define MA_SND_CHMAP_TC SND_CHMAP_TC +#define MA_SND_CHMAP_TFL SND_CHMAP_TFL +#define MA_SND_CHMAP_TFR SND_CHMAP_TFR +#define MA_SND_CHMAP_TFC SND_CHMAP_TFC +#define MA_SND_CHMAP_TRL SND_CHMAP_TRL +#define MA_SND_CHMAP_TRR SND_CHMAP_TRR +#define MA_SND_CHMAP_TRC SND_CHMAP_TRC +#define MA_SND_CHMAP_TFLC SND_CHMAP_TFLC +#define MA_SND_CHMAP_TFRC SND_CHMAP_TFRC +#define MA_SND_CHMAP_TSL SND_CHMAP_TSL +#define MA_SND_CHMAP_TSR SND_CHMAP_TSR +#define MA_SND_CHMAP_LLFE SND_CHMAP_LLFE +#define MA_SND_CHMAP_RLFE SND_CHMAP_RLFE +#define MA_SND_CHMAP_BC SND_CHMAP_BC +#define MA_SND_CHMAP_BLC SND_CHMAP_BLC +#define MA_SND_CHMAP_BRC SND_CHMAP_BRC + +/* Open mode flags. */ +#define MA_SND_PCM_NO_AUTO_RESAMPLE SND_PCM_NO_AUTO_RESAMPLE +#define MA_SND_PCM_NO_AUTO_CHANNELS SND_PCM_NO_AUTO_CHANNELS +#define MA_SND_PCM_NO_AUTO_FORMAT SND_PCM_NO_AUTO_FORMAT +#else +#include /* For EPIPE, etc. */ +typedef unsigned long ma_snd_pcm_uframes_t; +typedef long ma_snd_pcm_sframes_t; +typedef int ma_snd_pcm_stream_t; +typedef int ma_snd_pcm_format_t; +typedef int ma_snd_pcm_access_t; +typedef int ma_snd_pcm_state_t; +typedef struct ma_snd_pcm_t ma_snd_pcm_t; +typedef struct ma_snd_pcm_hw_params_t ma_snd_pcm_hw_params_t; +typedef struct ma_snd_pcm_sw_params_t ma_snd_pcm_sw_params_t; +typedef struct ma_snd_pcm_format_mask_t ma_snd_pcm_format_mask_t; +typedef struct ma_snd_pcm_info_t ma_snd_pcm_info_t; +typedef struct +{ + void* addr; + unsigned int first; + unsigned int step; +} ma_snd_pcm_channel_area_t; +typedef struct +{ + unsigned int channels; + unsigned int pos[1]; +} ma_snd_pcm_chmap_t; + +/* snd_pcm_state_t */ +#define MA_SND_PCM_STATE_OPEN 0 +#define MA_SND_PCM_STATE_SETUP 1 +#define MA_SND_PCM_STATE_PREPARED 2 +#define MA_SND_PCM_STATE_RUNNING 3 +#define MA_SND_PCM_STATE_XRUN 4 +#define MA_SND_PCM_STATE_DRAINING 5 +#define MA_SND_PCM_STATE_PAUSED 6 +#define MA_SND_PCM_STATE_SUSPENDED 7 +#define MA_SND_PCM_STATE_DISCONNECTED 8 + +/* snd_pcm_stream_t */ +#define MA_SND_PCM_STREAM_PLAYBACK 0 +#define MA_SND_PCM_STREAM_CAPTURE 1 + +/* snd_pcm_format_t */ +#define MA_SND_PCM_FORMAT_UNKNOWN -1 +#define MA_SND_PCM_FORMAT_U8 1 +#define MA_SND_PCM_FORMAT_S16_LE 2 +#define MA_SND_PCM_FORMAT_S16_BE 3 +#define MA_SND_PCM_FORMAT_S24_LE 6 +#define MA_SND_PCM_FORMAT_S24_BE 7 +#define MA_SND_PCM_FORMAT_S32_LE 10 +#define MA_SND_PCM_FORMAT_S32_BE 11 +#define MA_SND_PCM_FORMAT_FLOAT_LE 14 +#define MA_SND_PCM_FORMAT_FLOAT_BE 15 +#define MA_SND_PCM_FORMAT_FLOAT64_LE 16 +#define MA_SND_PCM_FORMAT_FLOAT64_BE 17 +#define MA_SND_PCM_FORMAT_MU_LAW 20 +#define MA_SND_PCM_FORMAT_A_LAW 21 +#define MA_SND_PCM_FORMAT_S24_3LE 32 +#define MA_SND_PCM_FORMAT_S24_3BE 33 + +/* snd_pcm_access_t */ +#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED 0 +#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED 1 +#define MA_SND_PCM_ACCESS_MMAP_COMPLEX 2 +#define MA_SND_PCM_ACCESS_RW_INTERLEAVED 3 +#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED 4 + +/* Channel positions. */ +#define MA_SND_CHMAP_UNKNOWN 0 +#define MA_SND_CHMAP_NA 1 +#define MA_SND_CHMAP_MONO 2 +#define MA_SND_CHMAP_FL 3 +#define MA_SND_CHMAP_FR 4 +#define MA_SND_CHMAP_RL 5 +#define MA_SND_CHMAP_RR 6 +#define MA_SND_CHMAP_FC 7 +#define MA_SND_CHMAP_LFE 8 +#define MA_SND_CHMAP_SL 9 +#define MA_SND_CHMAP_SR 10 +#define MA_SND_CHMAP_RC 11 +#define MA_SND_CHMAP_FLC 12 +#define MA_SND_CHMAP_FRC 13 +#define MA_SND_CHMAP_RLC 14 +#define MA_SND_CHMAP_RRC 15 +#define MA_SND_CHMAP_FLW 16 +#define MA_SND_CHMAP_FRW 17 +#define MA_SND_CHMAP_FLH 18 +#define MA_SND_CHMAP_FCH 19 +#define MA_SND_CHMAP_FRH 20 +#define MA_SND_CHMAP_TC 21 +#define MA_SND_CHMAP_TFL 22 +#define MA_SND_CHMAP_TFR 23 +#define MA_SND_CHMAP_TFC 24 +#define MA_SND_CHMAP_TRL 25 +#define MA_SND_CHMAP_TRR 26 +#define MA_SND_CHMAP_TRC 27 +#define MA_SND_CHMAP_TFLC 28 +#define MA_SND_CHMAP_TFRC 29 +#define MA_SND_CHMAP_TSL 30 +#define MA_SND_CHMAP_TSR 31 +#define MA_SND_CHMAP_LLFE 32 +#define MA_SND_CHMAP_RLFE 33 +#define MA_SND_CHMAP_BC 34 +#define MA_SND_CHMAP_BLC 35 +#define MA_SND_CHMAP_BRC 36 + +/* Open mode flags. */ +#define MA_SND_PCM_NO_AUTO_RESAMPLE 0x00010000 +#define MA_SND_PCM_NO_AUTO_CHANNELS 0x00020000 +#define MA_SND_PCM_NO_AUTO_FORMAT 0x00040000 +#endif + +typedef int (* ma_snd_pcm_open_proc) (ma_snd_pcm_t **pcm, const char *name, ma_snd_pcm_stream_t stream, int mode); +typedef int (* ma_snd_pcm_close_proc) (ma_snd_pcm_t *pcm); +typedef size_t (* ma_snd_pcm_hw_params_sizeof_proc) (void); +typedef int (* ma_snd_pcm_hw_params_any_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params); +typedef int (* ma_snd_pcm_hw_params_set_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val); +typedef int (* ma_snd_pcm_hw_params_set_format_first_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format); +typedef void (* ma_snd_pcm_hw_params_get_format_mask_proc) (ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_mask_t *mask); +typedef int (* ma_snd_pcm_hw_params_set_channels_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val); +typedef int (* ma_snd_pcm_hw_params_set_channels_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_set_channels_minmax_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *minimum, unsigned int *maximum); +typedef int (* ma_snd_pcm_hw_params_set_rate_resample_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val); +typedef int (* ma_snd_pcm_hw_params_set_rate_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val, int dir); +typedef int (* ma_snd_pcm_hw_params_set_rate_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_set_buffer_size_near_proc)(ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val); +typedef int (* ma_snd_pcm_hw_params_set_periods_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_set_access_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t _access); +typedef int (* ma_snd_pcm_hw_params_get_format_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format); +typedef int (* ma_snd_pcm_hw_params_get_channels_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_channels_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_channels_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_rate_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_rate_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_rate_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_buffer_size_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val); +typedef int (* ma_snd_pcm_hw_params_get_periods_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_access_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t *_access); +typedef int (* ma_snd_pcm_hw_params_test_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val); +typedef int (* ma_snd_pcm_hw_params_test_channels_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val); +typedef int (* ma_snd_pcm_hw_params_test_rate_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val, int dir); +typedef int (* ma_snd_pcm_hw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params); +typedef size_t (* ma_snd_pcm_sw_params_sizeof_proc) (void); +typedef int (* ma_snd_pcm_sw_params_current_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params); +typedef int (* ma_snd_pcm_sw_params_get_boundary_proc) (const ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t* val); +typedef int (* ma_snd_pcm_sw_params_set_avail_min_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_set_start_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_set_stop_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params); +typedef size_t (* ma_snd_pcm_format_mask_sizeof_proc) (void); +typedef int (* ma_snd_pcm_format_mask_test_proc) (const ma_snd_pcm_format_mask_t *mask, ma_snd_pcm_format_t val); +typedef ma_snd_pcm_chmap_t * (* ma_snd_pcm_get_chmap_proc) (ma_snd_pcm_t *pcm); +typedef ma_snd_pcm_state_t (* ma_snd_pcm_state_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_prepare_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_start_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_drop_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_drain_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_reset_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_device_name_hint_proc) (int card, const char *iface, void ***hints); +typedef char * (* ma_snd_device_name_get_hint_proc) (const void *hint, const char *id); +typedef int (* ma_snd_card_get_index_proc) (const char *name); +typedef int (* ma_snd_device_name_free_hint_proc) (void **hints); +typedef int (* ma_snd_pcm_mmap_begin_proc) (ma_snd_pcm_t *pcm, const ma_snd_pcm_channel_area_t **areas, ma_snd_pcm_uframes_t *offset, ma_snd_pcm_uframes_t *frames); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_mmap_commit_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_uframes_t offset, ma_snd_pcm_uframes_t frames); +typedef int (* ma_snd_pcm_recover_proc) (ma_snd_pcm_t *pcm, int err, int silent); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_readi_proc) (ma_snd_pcm_t *pcm, void *buffer, ma_snd_pcm_uframes_t size); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_writei_proc) (ma_snd_pcm_t *pcm, const void *buffer, ma_snd_pcm_uframes_t size); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_proc) (ma_snd_pcm_t *pcm); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_update_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_wait_proc) (ma_snd_pcm_t *pcm, int timeout); +typedef int (* ma_snd_pcm_nonblock_proc) (ma_snd_pcm_t *pcm, int nonblock); +typedef int (* ma_snd_pcm_info_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_info_t* info); +typedef size_t (* ma_snd_pcm_info_sizeof_proc) (void); +typedef const char* (* ma_snd_pcm_info_get_name_proc) (const ma_snd_pcm_info_t* info); +typedef int (* ma_snd_pcm_poll_descriptors_proc) (ma_snd_pcm_t *pcm, struct pollfd *pfds, unsigned int space); +typedef int (* ma_snd_pcm_poll_descriptors_count_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_poll_descriptors_revents_proc) (ma_snd_pcm_t *pcm, struct pollfd *pfds, unsigned int nfds, unsigned short *revents); +typedef int (* ma_snd_config_update_free_global_proc) (void); + +/* This array specifies each of the common devices that can be used for both playback and capture. */ +static const char* g_maCommonDeviceNamesALSA[] = { + "default", + "null", + "pulse", + "jack" +}; + +/* This array allows us to blacklist specific playback devices. */ +static const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = { + "" +}; + +/* This array allows us to blacklist specific capture devices. */ +static const char* g_maBlacklistedCaptureDeviceNamesALSA[] = { + "" +}; + + +static ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format) +{ + ma_snd_pcm_format_t ALSAFormats[] = { + MA_SND_PCM_FORMAT_UNKNOWN, /* ma_format_unknown */ + MA_SND_PCM_FORMAT_U8, /* ma_format_u8 */ + MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */ + MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */ + MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */ + MA_SND_PCM_FORMAT_FLOAT_LE /* ma_format_f32 */ + }; + + if (ma_is_big_endian()) { + ALSAFormats[0] = MA_SND_PCM_FORMAT_UNKNOWN; + ALSAFormats[1] = MA_SND_PCM_FORMAT_U8; + ALSAFormats[2] = MA_SND_PCM_FORMAT_S16_BE; + ALSAFormats[3] = MA_SND_PCM_FORMAT_S24_3BE; + ALSAFormats[4] = MA_SND_PCM_FORMAT_S32_BE; + ALSAFormats[5] = MA_SND_PCM_FORMAT_FLOAT_BE; + } + + return ALSAFormats[format]; +} + +static ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA) +{ + if (ma_is_little_endian()) { + switch (formatALSA) { + case MA_SND_PCM_FORMAT_S16_LE: return ma_format_s16; + case MA_SND_PCM_FORMAT_S24_3LE: return ma_format_s24; + case MA_SND_PCM_FORMAT_S32_LE: return ma_format_s32; + case MA_SND_PCM_FORMAT_FLOAT_LE: return ma_format_f32; + default: break; + } + } else { + switch (formatALSA) { + case MA_SND_PCM_FORMAT_S16_BE: return ma_format_s16; + case MA_SND_PCM_FORMAT_S24_3BE: return ma_format_s24; + case MA_SND_PCM_FORMAT_S32_BE: return ma_format_s32; + case MA_SND_PCM_FORMAT_FLOAT_BE: return ma_format_f32; + default: break; + } + } + + /* Endian agnostic. */ + switch (formatALSA) { + case MA_SND_PCM_FORMAT_U8: return ma_format_u8; + default: return ma_format_unknown; + } +} + +static ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos) +{ + switch (alsaChannelPos) + { + case MA_SND_CHMAP_MONO: return MA_CHANNEL_MONO; + case MA_SND_CHMAP_FL: return MA_CHANNEL_FRONT_LEFT; + case MA_SND_CHMAP_FR: return MA_CHANNEL_FRONT_RIGHT; + case MA_SND_CHMAP_RL: return MA_CHANNEL_BACK_LEFT; + case MA_SND_CHMAP_RR: return MA_CHANNEL_BACK_RIGHT; + case MA_SND_CHMAP_FC: return MA_CHANNEL_FRONT_CENTER; + case MA_SND_CHMAP_LFE: return MA_CHANNEL_LFE; + case MA_SND_CHMAP_SL: return MA_CHANNEL_SIDE_LEFT; + case MA_SND_CHMAP_SR: return MA_CHANNEL_SIDE_RIGHT; + case MA_SND_CHMAP_RC: return MA_CHANNEL_BACK_CENTER; + case MA_SND_CHMAP_FLC: return MA_CHANNEL_FRONT_LEFT_CENTER; + case MA_SND_CHMAP_FRC: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case MA_SND_CHMAP_RLC: return 0; + case MA_SND_CHMAP_RRC: return 0; + case MA_SND_CHMAP_FLW: return 0; + case MA_SND_CHMAP_FRW: return 0; + case MA_SND_CHMAP_FLH: return 0; + case MA_SND_CHMAP_FCH: return 0; + case MA_SND_CHMAP_FRH: return 0; + case MA_SND_CHMAP_TC: return MA_CHANNEL_TOP_CENTER; + case MA_SND_CHMAP_TFL: return MA_CHANNEL_TOP_FRONT_LEFT; + case MA_SND_CHMAP_TFR: return MA_CHANNEL_TOP_FRONT_RIGHT; + case MA_SND_CHMAP_TFC: return MA_CHANNEL_TOP_FRONT_CENTER; + case MA_SND_CHMAP_TRL: return MA_CHANNEL_TOP_BACK_LEFT; + case MA_SND_CHMAP_TRR: return MA_CHANNEL_TOP_BACK_RIGHT; + case MA_SND_CHMAP_TRC: return MA_CHANNEL_TOP_BACK_CENTER; + default: break; + } + + return 0; +} + +static ma_bool32 ma_is_common_device_name__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maCommonDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maCommonDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maBlacklistedPlaybackDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maBlacklistedPlaybackDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + +static ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maBlacklistedCaptureDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maBlacklistedCaptureDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + +static ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name) +{ + if (deviceType == ma_device_type_playback) { + return ma_is_playback_device_blacklisted__alsa(name); + } else { + return ma_is_capture_device_blacklisted__alsa(name); + } +} + + +static const char* ma_find_char(const char* str, char c, int* index) +{ + int i = 0; + for (;;) { + if (str[i] == '\0') { + if (index) *index = -1; + return NULL; + } + + if (str[i] == c) { + if (index) *index = i; + return str + i; + } + + i += 1; + } + + /* Should never get here, but treat it as though the character was not found to make me feel better inside. */ + if (index) *index = -1; + return NULL; +} + +static ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid) +{ + /* This function is just checking whether or not hwid is in "hw:%d,%d" format. */ + + int commaPos; + const char* dev; + int i; + + if (hwid == NULL) { + return MA_FALSE; + } + + if (hwid[0] != 'h' || hwid[1] != 'w' || hwid[2] != ':') { + return MA_FALSE; + } + + hwid += 3; + + dev = ma_find_char(hwid, ',', &commaPos); + if (dev == NULL) { + return MA_FALSE; + } else { + dev += 1; /* Skip past the ",". */ + } + + /* Check if the part between the ":" and the "," contains only numbers. If not, return false. */ + for (i = 0; i < commaPos; ++i) { + if (hwid[i] < '0' || hwid[i] > '9') { + return MA_FALSE; + } + } + + /* Check if everything after the "," is numeric. If not, return false. */ + i = 0; + while (dev[i] != '\0') { + if (dev[i] < '0' || dev[i] > '9') { + return MA_FALSE; + } + i += 1; + } + + return MA_TRUE; +} + +static int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */ +{ + /* src should look something like this: "hw:CARD=I82801AAICH,DEV=0" */ + + int colonPos; + int commaPos; + char card[256]; + const char* dev; + int cardIndex; + + if (dst == NULL) { + return -1; + } + if (dstSize < 7) { + return -1; /* Absolute minimum size of the output buffer is 7 bytes. */ + } + + *dst = '\0'; /* Safety. */ + if (src == NULL) { + return -1; + } + + /* If the input name is already in "hw:%d,%d" format, just return that verbatim. */ + if (ma_is_device_name_in_hw_format__alsa(src)) { + return ma_strcpy_s(dst, dstSize, src); + } + + src = ma_find_char(src, ':', &colonPos); + if (src == NULL) { + return -1; /* Couldn't find a colon */ + } + + dev = ma_find_char(src, ',', &commaPos); + if (dev == NULL) { + dev = "0"; + ma_strncpy_s(card, sizeof(card), src+6, (size_t)-1); /* +6 = ":CARD=" */ + } else { + dev = dev + 5; /* +5 = ",DEV=" */ + ma_strncpy_s(card, sizeof(card), src+6, commaPos-6); /* +6 = ":CARD=" */ + } + + cardIndex = ((ma_snd_card_get_index_proc)pContext->alsa.snd_card_get_index)(card); + if (cardIndex < 0) { + return -2; /* Failed to retrieve the card index. */ + } + + + /* Construction. */ + dst[0] = 'h'; dst[1] = 'w'; dst[2] = ':'; + if (ma_itoa_s(cardIndex, dst+3, dstSize-3, 10) != 0) { + return -3; + } + if (ma_strcat_s(dst, dstSize, ",") != 0) { + return -3; + } + if (ma_strcat_s(dst, dstSize, dev) != 0) { + return -3; + } + + return 0; +} + +static ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID) +{ + ma_uint32 i; + + MA_ASSERT(pHWID != NULL); + + for (i = 0; i < count; ++i) { + if (ma_strcmp(pUniqueIDs[i].alsa, pHWID) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, int openMode, ma_snd_pcm_t** ppPCM) +{ + ma_snd_pcm_t* pPCM; + ma_snd_pcm_stream_t stream; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppPCM != NULL); + + *ppPCM = NULL; + pPCM = NULL; + + stream = (deviceType == ma_device_type_playback) ? MA_SND_PCM_STREAM_PLAYBACK : MA_SND_PCM_STREAM_CAPTURE; + + if (pDeviceID == NULL) { + ma_bool32 isDeviceOpen; + size_t i; + + /* + We're opening the default device. I don't know if trying anything other than "default" is necessary, but it makes + me feel better to try as hard as we can get to get _something_ working. + */ + const char* defaultDeviceNames[] = { + "default", + NULL, + NULL, + NULL, + NULL, + NULL, + NULL + }; + + if (shareMode == ma_share_mode_exclusive) { + defaultDeviceNames[1] = "hw"; + defaultDeviceNames[2] = "hw:0"; + defaultDeviceNames[3] = "hw:0,0"; + } else { + if (deviceType == ma_device_type_playback) { + defaultDeviceNames[1] = "dmix"; + defaultDeviceNames[2] = "dmix:0"; + defaultDeviceNames[3] = "dmix:0,0"; + } else { + defaultDeviceNames[1] = "dsnoop"; + defaultDeviceNames[2] = "dsnoop:0"; + defaultDeviceNames[3] = "dsnoop:0,0"; + } + defaultDeviceNames[4] = "hw"; + defaultDeviceNames[5] = "hw:0"; + defaultDeviceNames[6] = "hw:0,0"; + } + + isDeviceOpen = MA_FALSE; + for (i = 0; i < ma_countof(defaultDeviceNames); ++i) { + if (defaultDeviceNames[i] != NULL && defaultDeviceNames[i][0] != '\0') { + if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, defaultDeviceNames[i], stream, openMode) == 0) { + isDeviceOpen = MA_TRUE; + break; + } + } + } + + if (!isDeviceOpen) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed when trying to open an appropriate default device."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + } else { + /* + We're trying to open a specific device. There's a few things to consider here: + + miniaudio recongnizes a special format of device id that excludes the "hw", "dmix", etc. prefix. It looks like this: ":0,0", ":0,1", etc. When + an ID of this format is specified, it indicates to miniaudio that it can try different combinations of plugins ("hw", "dmix", etc.) until it + finds an appropriate one that works. This comes in very handy when trying to open a device in shared mode ("dmix"), vs exclusive mode ("hw"). + */ + + /* May end up needing to make small adjustments to the ID, so make a copy. */ + ma_device_id deviceID = *pDeviceID; + int resultALSA = -ENODEV; + + if (deviceID.alsa[0] != ':') { + /* The ID is not in ":0,0" format. Use the ID exactly as-is. */ + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, deviceID.alsa, stream, openMode); + } else { + char hwid[256]; + + /* The ID is in ":0,0" format. Try different plugins depending on the shared mode. */ + if (deviceID.alsa[1] == '\0') { + deviceID.alsa[0] = '\0'; /* An ID of ":" should be converted to "". */ + } + + if (shareMode == ma_share_mode_shared) { + if (deviceType == ma_device_type_playback) { + ma_strcpy_s(hwid, sizeof(hwid), "dmix"); + } else { + ma_strcpy_s(hwid, sizeof(hwid), "dsnoop"); + } + + if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) { + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode); + } + } + + /* If at this point we still don't have an open device it means we're either preferencing exclusive mode or opening with "dmix"/"dsnoop" failed. */ + if (resultALSA != 0) { + ma_strcpy_s(hwid, sizeof(hwid), "hw"); + if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) { + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode); + } + } + } + + if (resultALSA < 0) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed."); + return ma_result_from_errno(-resultALSA); + } + } + + *ppPCM = pPCM; + return MA_SUCCESS; +} + + +static ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + int resultALSA; + ma_bool32 cbResult = MA_TRUE; + char** ppDeviceHints; + ma_device_id* pUniqueIDs = NULL; + ma_uint32 uniqueIDCount = 0; + char** ppNextDeviceHint; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + ma_mutex_lock(&pContext->alsa.internalDeviceEnumLock); + + resultALSA = ((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints); + if (resultALSA < 0) { + ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock); + return ma_result_from_errno(-resultALSA); + } + + ppNextDeviceHint = ppDeviceHints; + while (*ppNextDeviceHint != NULL) { + char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME"); + char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC"); + char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID"); + ma_device_type deviceType = ma_device_type_playback; + ma_bool32 stopEnumeration = MA_FALSE; + char hwid[sizeof(pUniqueIDs->alsa)]; + ma_device_info deviceInfo; + + if ((IOID == NULL || ma_strcmp(IOID, "Output") == 0)) { + deviceType = ma_device_type_playback; + } + if ((IOID != NULL && ma_strcmp(IOID, "Input" ) == 0)) { + deviceType = ma_device_type_capture; + } + + if (NAME != NULL) { + if (pContext->alsa.useVerboseDeviceEnumeration) { + /* Verbose mode. Use the name exactly as-is. */ + ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1); + } else { + /* Simplified mode. Use ":%d,%d" format. */ + if (ma_convert_device_name_to_hw_format__alsa(pContext, hwid, sizeof(hwid), NAME) == 0) { + /* + At this point, hwid looks like "hw:0,0". In simplified enumeration mode, we actually want to strip off the + plugin name so it looks like ":0,0". The reason for this is that this special format is detected at device + initialization time and is used as an indicator to try and use the most appropriate plugin depending on the + device type and sharing mode. + */ + char* dst = hwid; + char* src = hwid+2; + while ((*dst++ = *src++)); + } else { + /* Conversion to "hw:%d,%d" failed. Just use the name as-is. */ + ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1); + } + + if (ma_does_id_exist_in_list__alsa(pUniqueIDs, uniqueIDCount, hwid)) { + goto next_device; /* The device has already been enumerated. Move on to the next one. */ + } else { + /* The device has not yet been enumerated. Make sure it's added to our list so that it's not enumerated again. */ + size_t newCapacity = sizeof(*pUniqueIDs) * (uniqueIDCount + 1); + ma_device_id* pNewUniqueIDs = (ma_device_id*)ma_realloc(pUniqueIDs, newCapacity, &pContext->allocationCallbacks); + if (pNewUniqueIDs == NULL) { + goto next_device; /* Failed to allocate memory. */ + } + + pUniqueIDs = pNewUniqueIDs; + MA_COPY_MEMORY(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid)); + uniqueIDCount += 1; + } + } + } else { + MA_ZERO_MEMORY(hwid, sizeof(hwid)); + } + + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.id.alsa, sizeof(deviceInfo.id.alsa), hwid, (size_t)-1); + + /* + There's no good way to determine whether or not a device is the default on Linux. We're just going to do something simple and + just use the name of "default" as the indicator. + */ + if (ma_strcmp(deviceInfo.id.alsa, "default") == 0) { + deviceInfo.isDefault = MA_TRUE; + } + + + /* + DESC is the friendly name. We treat this slightly differently depending on whether or not we are using verbose + device enumeration. In verbose mode we want to take the entire description so that the end-user can distinguish + between the subdevices of each card/dev pair. In simplified mode, however, we only want the first part of the + description. + + The value in DESC seems to be split into two lines, with the first line being the name of the device and the + second line being a description of the device. I don't like having the description be across two lines because + it makes formatting ugly and annoying. I'm therefore deciding to put it all on a single line with the second line + being put into parentheses. In simplified mode I'm just stripping the second line entirely. + */ + if (DESC != NULL) { + int lfPos; + const char* line2 = ma_find_char(DESC, '\n', &lfPos); + if (line2 != NULL) { + line2 += 1; /* Skip past the new-line character. */ + + if (pContext->alsa.useVerboseDeviceEnumeration) { + /* Verbose mode. Put the second line in brackets. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), " ("); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), line2); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), ")"); + } else { + /* Simplified mode. Strip the second line entirely. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos); + } + } else { + /* There's no second line. Just copy the whole description. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, (size_t)-1); + } + } + + if (!ma_is_device_blacklisted__alsa(deviceType, NAME)) { + cbResult = callback(pContext, deviceType, &deviceInfo, pUserData); + } + + /* + Some devices are both playback and capture, but they are only enumerated by ALSA once. We need to fire the callback + again for the other device type in this case. We do this for known devices and where the IOID hint is NULL, which + means both Input and Output. + */ + if (cbResult) { + if (ma_is_common_device_name__alsa(NAME) || IOID == NULL) { + if (deviceType == ma_device_type_playback) { + if (!ma_is_capture_device_blacklisted__alsa(NAME)) { + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } else { + if (!ma_is_playback_device_blacklisted__alsa(NAME)) { + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + } + } + } + + if (cbResult == MA_FALSE) { + stopEnumeration = MA_TRUE; + } + + next_device: + free(NAME); + free(DESC); + free(IOID); + ppNextDeviceHint += 1; + + /* We need to stop enumeration if the callback returned false. */ + if (stopEnumeration) { + break; + } + } + + ma_free(pUniqueIDs, &pContext->allocationCallbacks); + ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints); + + ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock); + + return MA_SUCCESS; +} + + +typedef struct +{ + ma_device_type deviceType; + const ma_device_id* pDeviceID; + ma_share_mode shareMode; + ma_device_info* pDeviceInfo; + ma_bool32 foundDevice; +} ma_context_get_device_info_enum_callback_data__alsa; + +static ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) +{ + ma_context_get_device_info_enum_callback_data__alsa* pData = (ma_context_get_device_info_enum_callback_data__alsa*)pUserData; + MA_ASSERT(pData != NULL); + + (void)pContext; + + if (pData->pDeviceID == NULL && ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1); + pData->foundDevice = MA_TRUE; + } else { + if (pData->deviceType == deviceType && (pData->pDeviceID != NULL && ma_strcmp(pData->pDeviceID->alsa, pDeviceInfo->id.alsa) == 0)) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1); + pData->foundDevice = MA_TRUE; + } + } + + /* Keep enumerating until we have found the device. */ + return !pData->foundDevice; +} + +static void ma_context_test_rate_and_add_native_data_format__alsa(ma_context* pContext, ma_snd_pcm_t* pPCM, ma_snd_pcm_hw_params_t* pHWParams, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pPCM != NULL); + MA_ASSERT(pHWParams != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + if (pDeviceInfo->nativeDataFormatCount < ma_countof(pDeviceInfo->nativeDataFormats) && ((ma_snd_pcm_hw_params_test_rate_proc)pContext->alsa.snd_pcm_hw_params_test_rate)(pPCM, pHWParams, sampleRate, 0) == 0) { + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags; + pDeviceInfo->nativeDataFormatCount += 1; + } +} + +static void ma_context_iterate_rates_and_add_native_data_format__alsa(ma_context* pContext, ma_snd_pcm_t* pPCM, ma_snd_pcm_hw_params_t* pHWParams, ma_format format, ma_uint32 channels, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + ma_uint32 iSampleRate; + unsigned int minSampleRate; + unsigned int maxSampleRate; + int sampleRateDir; /* Not used. Just passed into snd_pcm_hw_params_get_rate_min/max(). */ + + /* There could be a range. */ + ((ma_snd_pcm_hw_params_get_rate_min_proc)pContext->alsa.snd_pcm_hw_params_get_rate_min)(pHWParams, &minSampleRate, &sampleRateDir); + ((ma_snd_pcm_hw_params_get_rate_max_proc)pContext->alsa.snd_pcm_hw_params_get_rate_max)(pHWParams, &maxSampleRate, &sampleRateDir); + + /* Make sure our sample rates are clamped to sane values. Stupid devices like "pulse" will reports rates like "1" which is ridiculus. */ + minSampleRate = ma_clamp(minSampleRate, (unsigned int)ma_standard_sample_rate_min, (unsigned int)ma_standard_sample_rate_max); + maxSampleRate = ma_clamp(maxSampleRate, (unsigned int)ma_standard_sample_rate_min, (unsigned int)ma_standard_sample_rate_max); + + for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); iSampleRate += 1) { + ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iSampleRate]; + + if (standardSampleRate >= minSampleRate && standardSampleRate <= maxSampleRate) { + ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, standardSampleRate, flags, pDeviceInfo); + } + } + + /* Now make sure our min and max rates are included just in case they aren't in the range of our standard rates. */ + if (!ma_is_standard_sample_rate(minSampleRate)) { + ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, minSampleRate, flags, pDeviceInfo); + } + + if (!ma_is_standard_sample_rate(maxSampleRate) && maxSampleRate != minSampleRate) { + ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, maxSampleRate, flags, pDeviceInfo); + } +} + +static ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_context_get_device_info_enum_callback_data__alsa data; + ma_result result; + int resultALSA; + ma_snd_pcm_t* pPCM; + ma_snd_pcm_hw_params_t* pHWParams; + ma_uint32 iFormat; + ma_uint32 iChannel; + + MA_ASSERT(pContext != NULL); + + /* We just enumerate to find basic information about the device. */ + data.deviceType = deviceType; + data.pDeviceID = pDeviceID; + data.pDeviceInfo = pDeviceInfo; + data.foundDevice = MA_FALSE; + result = ma_context_enumerate_devices__alsa(pContext, ma_context_get_device_info_enum_callback__alsa, &data); + if (result != MA_SUCCESS) { + return result; + } + + if (!data.foundDevice) { + return MA_NO_DEVICE; + } + + if (ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) { + pDeviceInfo->isDefault = MA_TRUE; + } + + /* For detailed info we need to open the device. */ + result = ma_context_open_pcm__alsa(pContext, ma_share_mode_shared, deviceType, pDeviceID, 0, &pPCM); + if (result != MA_SUCCESS) { + return result; + } + + /* We need to initialize a HW parameters object in order to know what formats are supported. */ + pHWParams = (ma_snd_pcm_hw_params_t*)ma_calloc(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks); + if (pHWParams == NULL) { + ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + if (resultALSA < 0) { + ma_free(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed."); + return ma_result_from_errno(-resultALSA); + } + + /* + Some ALSA devices can support many permutations of formats, channels and rates. We only support + a fixed number of permutations which means we need to employ some strategies to ensure the best + combinations are returned. An example is the "pulse" device which can do it's own data conversion + in software and as a result can support any combination of format, channels and rate. + + We want to ensure the the first data formats are the best. We have a list of favored sample + formats and sample rates, so these will be the basis of our iteration. + */ + + /* Formats. We just iterate over our standard formats and test them, making sure we reset the configuration space each iteration. */ + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); iFormat += 1) { + ma_format format = g_maFormatPriorities[iFormat]; + + /* + For each format we need to make sure we reset the configuration space so we don't return + channel counts and rates that aren't compatible with a format. + */ + ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + + /* Test the format first. If this fails it means the format is not supported and we can skip it. */ + if (((ma_snd_pcm_hw_params_test_format_proc)pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format)) == 0) { + /* The format is supported. */ + unsigned int minChannels; + unsigned int maxChannels; + + /* + The configuration space needs to be restricted to this format so we can get an accurate + picture of which sample rates and channel counts are support with this format. + */ + ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format)); + + /* Now we need to check for supported channels. */ + ((ma_snd_pcm_hw_params_get_channels_min_proc)pContext->alsa.snd_pcm_hw_params_get_channels_min)(pHWParams, &minChannels); + ((ma_snd_pcm_hw_params_get_channels_max_proc)pContext->alsa.snd_pcm_hw_params_get_channels_max)(pHWParams, &maxChannels); + + if (minChannels > MA_MAX_CHANNELS) { + continue; /* Too many channels. */ + } + if (maxChannels < MA_MIN_CHANNELS) { + continue; /* Not enough channels. */ + } + + /* + Make sure the channel count is clamped. This is mainly intended for the max channels + because some devices can report an unbound maximum. + */ + minChannels = ma_clamp(minChannels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + maxChannels = ma_clamp(maxChannels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + + if (minChannels == MA_MIN_CHANNELS && maxChannels == MA_MAX_CHANNELS) { + /* The device supports all channels. Don't iterate over every single one. Instead just set the channels to 0 which means all channels are supported. */ + ma_context_iterate_rates_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, 0, 0, pDeviceInfo); /* Intentionally setting the channel count to 0 as that means all channels are supported. */ + } else { + /* The device only supports a specific set of channels. We need to iterate over all of them. */ + for (iChannel = minChannels; iChannel <= maxChannels; iChannel += 1) { + /* Test the channel before applying it to the configuration space. */ + unsigned int channels = iChannel; + + /* Make sure our channel range is reset before testing again or else we'll always fail the test. */ + ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format)); + + if (((ma_snd_pcm_hw_params_test_channels_proc)pContext->alsa.snd_pcm_hw_params_test_channels)(pPCM, pHWParams, channels) == 0) { + /* The channel count is supported. */ + + /* The configuration space now needs to be restricted to the channel count before extracting the sample rate. */ + ((ma_snd_pcm_hw_params_set_channels_proc)pContext->alsa.snd_pcm_hw_params_set_channels)(pPCM, pHWParams, channels); + + /* Only after the configuration space has been restricted to the specific channel count should we iterate over our sample rates. */ + ma_context_iterate_rates_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, 0, pDeviceInfo); + } else { + /* The channel count is not supported. Skip. */ + } + } + } + } else { + /* The format is not supported. Skip. */ + } + } + + ma_free(pHWParams, &pContext->allocationCallbacks); + + ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__alsa(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if ((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + close(pDevice->alsa.wakeupfdCapture); + ma_free(pDevice->alsa.pPollDescriptorsCapture, &pDevice->pContext->allocationCallbacks); + } + + if ((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + close(pDevice->alsa.wakeupfdPlayback); + ma_free(pDevice->alsa.pPollDescriptorsPlayback, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init_by_type__alsa(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + ma_result result; + int resultALSA; + ma_snd_pcm_t* pPCM; + ma_bool32 isUsingMMap; + ma_snd_pcm_format_t formatALSA; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + int openMode; + ma_snd_pcm_hw_params_t* pHWParams; + ma_snd_pcm_sw_params_t* pSWParams; + ma_snd_pcm_uframes_t bufferBoundary; + int pollDescriptorCount; + struct pollfd* pPollDescriptors; + int wakeupfd; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */ + MA_ASSERT(pDevice != NULL); + + formatALSA = ma_convert_ma_format_to_alsa_format(pDescriptor->format); + + openMode = 0; + if (pConfig->alsa.noAutoResample) { + openMode |= MA_SND_PCM_NO_AUTO_RESAMPLE; + } + if (pConfig->alsa.noAutoChannels) { + openMode |= MA_SND_PCM_NO_AUTO_CHANNELS; + } + if (pConfig->alsa.noAutoFormat) { + openMode |= MA_SND_PCM_NO_AUTO_FORMAT; + } + + result = ma_context_open_pcm__alsa(pDevice->pContext, pDescriptor->shareMode, deviceType, pDescriptor->pDeviceID, openMode, &pPCM); + if (result != MA_SUCCESS) { + return result; + } + + + /* Hardware parameters. */ + pHWParams = (ma_snd_pcm_hw_params_t*)ma_calloc(((ma_snd_pcm_hw_params_sizeof_proc)pDevice->pContext->alsa.snd_pcm_hw_params_sizeof)(), &pDevice->pContext->allocationCallbacks); + if (pHWParams == NULL) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for hardware parameters."); + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_hw_params_any_proc)pDevice->pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed."); + return ma_result_from_errno(-resultALSA); + } + + /* MMAP Mode. Try using interleaved MMAP access. If this fails, fall back to standard readi/writei. */ + isUsingMMap = MA_FALSE; +#if 0 /* NOTE: MMAP mode temporarily disabled. */ + if (deviceType != ma_device_type_capture) { /* <-- Disabling MMAP mode for capture devices because I apparently do not have a device that supports it which means I can't test it... Contributions welcome. */ + if (!pConfig->alsa.noMMap) { + if (((ma_snd_pcm_hw_params_set_access_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_MMAP_INTERLEAVED) == 0) { + pDevice->alsa.isUsingMMap = MA_TRUE; + } + } + } +#endif + + if (!isUsingMMap) { + resultALSA = ((ma_snd_pcm_hw_params_set_access_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_RW_INTERLEAVED); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set access mode to neither SND_PCM_ACCESS_MMAP_INTERLEAVED nor SND_PCM_ACCESS_RW_INTERLEAVED. snd_pcm_hw_params_set_access() failed."); + return ma_result_from_errno(-resultALSA); + } + } + + /* + Most important properties first. The documentation for OSS (yes, I know this is ALSA!) recommends format, channels, then sample rate. I can't + find any documentation for ALSA specifically, so I'm going to copy the recommendation for OSS. + */ + + /* Format. */ + { + /* + At this point we should have a list of supported formats, so now we need to find the best one. We first check if the requested format is + supported, and if so, use that one. If it's not supported, we just run though a list of formats and try to find the best one. + */ + if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN || ((ma_snd_pcm_hw_params_test_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, formatALSA) != 0) { + /* We're either requesting the native format or the specified format is not supported. */ + size_t iFormat; + + formatALSA = MA_SND_PCM_FORMAT_UNKNOWN; + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); ++iFormat) { + if (((ma_snd_pcm_hw_params_test_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(g_maFormatPriorities[iFormat])) == 0) { + formatALSA = ma_convert_ma_format_to_alsa_format(g_maFormatPriorities[iFormat]); + break; + } + } + + if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. The device does not support any miniaudio formats."); + return MA_FORMAT_NOT_SUPPORTED; + } + } + + resultALSA = ((ma_snd_pcm_hw_params_set_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, formatALSA); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. snd_pcm_hw_params_set_format() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalFormat = ma_format_from_alsa(formatALSA); + if (internalFormat == ma_format_unknown) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] The chosen format is not supported by miniaudio."); + return MA_FORMAT_NOT_SUPPORTED; + } + } + + /* Channels. */ + { + unsigned int channels = pDescriptor->channels; + if (channels == 0) { + channels = MA_DEFAULT_CHANNELS; + } + + resultALSA = ((ma_snd_pcm_hw_params_set_channels_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_channels_near)(pPCM, pHWParams, &channels); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set channel count. snd_pcm_hw_params_set_channels_near() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalChannels = (ma_uint32)channels; + } + + /* Sample Rate */ + { + unsigned int sampleRate; + + /* + It appears there's either a bug in ALSA, a bug in some drivers, or I'm doing something silly; but having resampling enabled causes + problems with some device configurations when used in conjunction with MMAP access mode. To fix this problem we need to disable + resampling. + + To reproduce this problem, open the "plug:dmix" device, and set the sample rate to 44100. Internally, it looks like dmix uses a + sample rate of 48000. The hardware parameters will get set correctly with no errors, but it looks like the 44100 -> 48000 resampling + doesn't work properly - but only with MMAP access mode. You will notice skipping/crackling in the audio, and it'll run at a slightly + faster rate. + + miniaudio has built-in support for sample rate conversion (albeit low quality at the moment), so disabling resampling should be fine + for us. The only problem is that it won't be taking advantage of any kind of hardware-accelerated resampling and it won't be very + good quality until I get a chance to improve the quality of miniaudio's software sample rate conversion. + + I don't currently know if the dmix plugin is the only one with this error. Indeed, this is the only one I've been able to reproduce + this error with. In the future, we may want to restrict the disabling of resampling to only known bad plugins. + */ + ((ma_snd_pcm_hw_params_set_rate_resample_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_rate_resample)(pPCM, pHWParams, 0); + + sampleRate = pDescriptor->sampleRate; + if (sampleRate == 0) { + sampleRate = MA_DEFAULT_SAMPLE_RATE; + } + + resultALSA = ((ma_snd_pcm_hw_params_set_rate_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_rate_near)(pPCM, pHWParams, &sampleRate, 0); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Sample rate not supported. snd_pcm_hw_params_set_rate_near() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalSampleRate = (ma_uint32)sampleRate; + } + + /* Periods. */ + { + ma_uint32 periods = pDescriptor->periodCount; + + resultALSA = ((ma_snd_pcm_hw_params_set_periods_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_periods_near)(pPCM, pHWParams, &periods, NULL); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set period count. snd_pcm_hw_params_set_periods_near() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalPeriods = periods; + } + + /* Buffer Size */ + { + ma_snd_pcm_uframes_t actualBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, internalSampleRate, pConfig->performanceProfile) * internalPeriods; + + resultALSA = ((ma_snd_pcm_hw_params_set_buffer_size_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_buffer_size_near)(pPCM, pHWParams, &actualBufferSizeInFrames); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set buffer size for device. snd_pcm_hw_params_set_buffer_size() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalPeriodSizeInFrames = actualBufferSizeInFrames / internalPeriods; + } + + /* Apply hardware parameters. */ + resultALSA = ((ma_snd_pcm_hw_params_proc)pDevice->pContext->alsa.snd_pcm_hw_params)(pPCM, pHWParams); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set hardware parameters. snd_pcm_hw_params() failed."); + return ma_result_from_errno(-resultALSA); + } + + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + pHWParams = NULL; + + + /* Software parameters. */ + pSWParams = (ma_snd_pcm_sw_params_t*)ma_calloc(((ma_snd_pcm_sw_params_sizeof_proc)pDevice->pContext->alsa.snd_pcm_sw_params_sizeof)(), &pDevice->pContext->allocationCallbacks); + if (pSWParams == NULL) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for software parameters."); + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_sw_params_current_proc)pDevice->pContext->alsa.snd_pcm_sw_params_current)(pPCM, pSWParams); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize software parameters. snd_pcm_sw_params_current() failed."); + return ma_result_from_errno(-resultALSA); + } + + resultALSA = ((ma_snd_pcm_sw_params_set_avail_min_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, ma_prev_power_of_2(internalPeriodSizeInFrames)); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed."); + return ma_result_from_errno(-resultALSA); + } + + resultALSA = ((ma_snd_pcm_sw_params_get_boundary_proc)pDevice->pContext->alsa.snd_pcm_sw_params_get_boundary)(pSWParams, &bufferBoundary); + if (resultALSA < 0) { + bufferBoundary = internalPeriodSizeInFrames * internalPeriods; + } + + if (deviceType == ma_device_type_playback && !isUsingMMap) { /* Only playback devices in writei/readi mode need a start threshold. */ + /* + Subtle detail here with the start threshold. When in playback-only mode (no full-duplex) we can set the start threshold to + the size of a period. But for full-duplex we need to set it such that it is at least two periods. + */ + resultALSA = ((ma_snd_pcm_sw_params_set_start_threshold_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_start_threshold)(pPCM, pSWParams, internalPeriodSizeInFrames*2); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set start threshold for playback device. snd_pcm_sw_params_set_start_threshold() failed."); + return ma_result_from_errno(-resultALSA); + } + + resultALSA = ((ma_snd_pcm_sw_params_set_stop_threshold_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_stop_threshold)(pPCM, pSWParams, bufferBoundary); + if (resultALSA < 0) { /* Set to boundary to loop instead of stop in the event of an xrun. */ + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set stop threshold for playback device. snd_pcm_sw_params_set_stop_threshold() failed."); + return ma_result_from_errno(-resultALSA); + } + } + + resultALSA = ((ma_snd_pcm_sw_params_proc)pDevice->pContext->alsa.snd_pcm_sw_params)(pPCM, pSWParams); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set software parameters. snd_pcm_sw_params() failed."); + return ma_result_from_errno(-resultALSA); + } + + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + pSWParams = NULL; + + + /* Grab the internal channel map. For now we're not going to bother trying to change the channel map and instead just do it ourselves. */ + { + ma_snd_pcm_chmap_t* pChmap = NULL; + if (pDevice->pContext->alsa.snd_pcm_get_chmap != NULL) { + pChmap = ((ma_snd_pcm_get_chmap_proc)pDevice->pContext->alsa.snd_pcm_get_chmap)(pPCM); + } + + if (pChmap != NULL) { + ma_uint32 iChannel; + + /* There are cases where the returned channel map can have a different channel count than was returned by snd_pcm_hw_params_set_channels_near(). */ + if (pChmap->channels >= internalChannels) { + /* Drop excess channels. */ + for (iChannel = 0; iChannel < internalChannels; ++iChannel) { + internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]); + } + } else { + ma_uint32 i; + + /* + Excess channels use defaults. Do an initial fill with defaults, overwrite the first pChmap->channels, validate to ensure there are no duplicate + channels. If validation fails, fall back to defaults. + */ + ma_bool32 isValid = MA_TRUE; + + /* Fill with defaults. */ + ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels); + + /* Overwrite first pChmap->channels channels. */ + for (iChannel = 0; iChannel < pChmap->channels; ++iChannel) { + internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]); + } + + /* Validate. */ + for (i = 0; i < internalChannels && isValid; ++i) { + ma_uint32 j; + for (j = i+1; j < internalChannels; ++j) { + if (internalChannelMap[i] == internalChannelMap[j]) { + isValid = MA_FALSE; + break; + } + } + } + + /* If our channel map is invalid, fall back to defaults. */ + if (!isValid) { + ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels); + } + } + + free(pChmap); + pChmap = NULL; + } else { + /* Could not retrieve the channel map. Fall back to a hard-coded assumption. */ + ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels); + } + } + + + /* + We need to retrieve the poll descriptors so we can use poll() to wait for data to become + available for reading or writing. There's no well defined maximum for this so we're just going + to allocate this on the heap. + */ + pollDescriptorCount = ((ma_snd_pcm_poll_descriptors_count_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors_count)(pPCM); + if (pollDescriptorCount <= 0) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to retrieve poll descriptors count."); + return MA_ERROR; + } + + pPollDescriptors = (struct pollfd*)ma_malloc(sizeof(*pPollDescriptors) * (pollDescriptorCount + 1), &pDevice->pContext->allocationCallbacks); /* +1 because we want room for the wakeup descriptor. */ + if (pPollDescriptors == NULL) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for poll descriptors."); + return MA_OUT_OF_MEMORY; + } + + /* + We need an eventfd to wakeup from poll() and avoid a deadlock in situations where the driver + never returns from writei() and readi(). This has been observed with the "pulse" device. + */ + wakeupfd = eventfd(0, 0); + if (wakeupfd < 0) { + ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to create eventfd for poll wakeup."); + return ma_result_from_errno(errno); + } + + /* We'll place the wakeup fd at the start of the buffer. */ + pPollDescriptors[0].fd = wakeupfd; + pPollDescriptors[0].events = POLLIN; /* We only care about waiting to read from the wakeup file descriptor. */ + pPollDescriptors[0].revents = 0; + + /* We can now extract the PCM poll descriptors which we place after the wakeup descriptor. */ + pollDescriptorCount = ((ma_snd_pcm_poll_descriptors_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors)(pPCM, pPollDescriptors + 1, pollDescriptorCount); /* +1 because we want to place these descriptors after the wakeup descriptor. */ + if (pollDescriptorCount <= 0) { + close(wakeupfd); + ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to retrieve poll descriptors."); + return MA_ERROR; + } + + if (deviceType == ma_device_type_capture) { + pDevice->alsa.pollDescriptorCountCapture = pollDescriptorCount; + pDevice->alsa.pPollDescriptorsCapture = pPollDescriptors; + pDevice->alsa.wakeupfdCapture = wakeupfd; + } else { + pDevice->alsa.pollDescriptorCountPlayback = pollDescriptorCount; + pDevice->alsa.pPollDescriptorsPlayback = pPollDescriptors; + pDevice->alsa.wakeupfdPlayback = wakeupfd; + } + + + /* We're done. Prepare the device. */ + resultALSA = ((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)(pPCM); + if (resultALSA < 0) { + close(wakeupfd); + ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device."); + return ma_result_from_errno(-resultALSA); + } + + + if (deviceType == ma_device_type_capture) { + pDevice->alsa.pPCMCapture = (ma_ptr)pPCM; + pDevice->alsa.isUsingMMapCapture = isUsingMMap; + } else { + pDevice->alsa.pPCMPlayback = (ma_ptr)pPCM; + pDevice->alsa.isUsingMMapPlayback = isUsingMMap; + } + + pDescriptor->format = internalFormat; + pDescriptor->channels = internalChannels; + pDescriptor->sampleRate = internalSampleRate; + ma_channel_map_copy(pDescriptor->channelMap, internalChannelMap, ma_min(internalChannels, MA_MAX_CHANNELS)); + pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames; + pDescriptor->periodCount = internalPeriods; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__alsa(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->alsa); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_by_type__alsa(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_by_type__alsa(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__alsa(ma_device* pDevice) +{ + int resultALSA; + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start capture device."); + return ma_result_from_errno(-resultALSA); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* Don't need to do anything for playback because it'll be started automatically when enough data has been written. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__alsa(ma_device* pDevice) +{ + /* + The stop callback will get called on the worker thread after read/write__alsa() has returned. At this point there is + a small chance that our wakeupfd has not been cleared. We'll clear that out now if applicable. + */ + int resultPoll; + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping capture device...\n"); + ((ma_snd_pcm_drop_proc)pDevice->pContext->alsa.snd_pcm_drop)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping capture device successful.\n"); + + /* We need to prepare the device again, otherwise we won't be able to restart the device. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device...\n"); + if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device failed.\n"); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device successful.\n"); + } + + /* Clear the wakeupfd. */ + resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture, 1, 0); + if (resultPoll > 0) { + ma_uint64 t; + read(((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture)[0].fd, &t, sizeof(t)); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping playback device...\n"); + ((ma_snd_pcm_drop_proc)pDevice->pContext->alsa.snd_pcm_drop)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping playback device successful.\n"); + + /* We need to prepare the device again, otherwise we won't be able to restart the device. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device...\n"); + if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device failed.\n"); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device successful.\n"); + } + + /* Clear the wakeupfd. */ + resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback, 1, 0); + if (resultPoll > 0) { + ma_uint64 t; + read(((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback)[0].fd, &t, sizeof(t)); + } + + } + + return MA_SUCCESS; +} + +static ma_result ma_device_wait__alsa(ma_device* pDevice, ma_snd_pcm_t* pPCM, struct pollfd* pPollDescriptors, int pollDescriptorCount, short requiredEvent) +{ + for (;;) { + unsigned short revents; + int resultALSA; + int resultPoll = poll(pPollDescriptors, pollDescriptorCount, -1); + if (resultPoll < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] poll() failed.\n"); + return ma_result_from_errno(errno); + } + + /* + Before checking the ALSA poll descriptor flag we need to check if the wakeup descriptor + has had it's POLLIN flag set. If so, we need to actually read the data and then exit + function. The wakeup descriptor will be the first item in the descriptors buffer. + */ + if ((pPollDescriptors[0].revents & POLLIN) != 0) { + ma_uint64 t; + int resultRead = read(pPollDescriptors[0].fd, &t, sizeof(t)); /* <-- Important that we read here so that the next write() does not block. */ + if (resultRead < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] read() failed.\n"); + return ma_result_from_errno(errno); + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] POLLIN set for wakeupfd\n"); + return MA_DEVICE_NOT_STARTED; + } + + /* + Getting here means that some data should be able to be read. We need to use ALSA to + translate the revents flags for us. + */ + resultALSA = ((ma_snd_pcm_poll_descriptors_revents_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors_revents)(pPCM, pPollDescriptors + 1, pollDescriptorCount - 1, &revents); /* +1, -1 to ignore the wakeup descriptor. */ + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_poll_descriptors_revents() failed.\n"); + return ma_result_from_errno(-resultALSA); + } + + if ((revents & POLLERR) != 0) { + ma_snd_pcm_state_t state = ((ma_snd_pcm_state_proc)pDevice->pContext->alsa.snd_pcm_state)(pPCM); + if (state == MA_SND_PCM_STATE_XRUN) { + /* The PCM is in a xrun state. This will be recovered from at a higher level. We can disregard this. */ + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[ALSA] POLLERR detected. status = %d\n", ((ma_snd_pcm_state_proc)pDevice->pContext->alsa.snd_pcm_state)(pPCM)); + } + } + + if ((revents & requiredEvent) == requiredEvent) { + break; /* We're done. Data available for reading or writing. */ + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_wait_read__alsa(ma_device* pDevice) +{ + return ma_device_wait__alsa(pDevice, (ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, (struct pollfd*)pDevice->alsa.pPollDescriptorsCapture, pDevice->alsa.pollDescriptorCountCapture + 1, POLLIN); /* +1 to account for the wakeup descriptor. */ +} + +static ma_result ma_device_wait_write__alsa(ma_device* pDevice) +{ + return ma_device_wait__alsa(pDevice, (ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, (struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback, pDevice->alsa.pollDescriptorCountPlayback + 1, POLLOUT); /* +1 to account for the wakeup descriptor. */ +} + +static ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_snd_pcm_sframes_t resultALSA = 0; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + while (ma_device_get_state(pDevice) == ma_device_state_started) { + ma_result result; + + /* The first thing to do is wait for data to become available for reading. This will return an error code if the device has been stopped. */ + result = ma_device_wait_read__alsa(pDevice); + if (result != MA_SUCCESS) { + return result; + } + + /* Getting here means we should have data available. */ + resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount); + if (resultALSA >= 0) { + break; /* Success. */ + } else { + if (resultALSA == -EAGAIN) { + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EGAIN (read)\n");*/ + continue; /* Try again. */ + } else if (resultALSA == -EPIPE) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EPIPE (read)\n"); + + /* Overrun. Recover and try again. If this fails we need to return an error. */ + resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun."); + return ma_result_from_errno((int)-resultALSA); + } + + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun."); + return ma_result_from_errno((int)-resultALSA); + } + + continue; /* Try reading again. */ + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = resultALSA; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_snd_pcm_sframes_t resultALSA = 0; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrames != NULL); + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + while (ma_device_get_state(pDevice) == ma_device_state_started) { + ma_result result; + + /* The first thing to do is wait for space to become available for writing. This will return an error code if the device has been stopped. */ + result = ma_device_wait_write__alsa(pDevice); + if (result != MA_SUCCESS) { + return result; + } + + resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount); + if (resultALSA >= 0) { + break; /* Success. */ + } else { + if (resultALSA == -EAGAIN) { + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EGAIN (write)\n");*/ + continue; /* Try again. */ + } else if (resultALSA == -EPIPE) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EPIPE (write)\n"); + + /* Underrun. Recover and try again. If this fails we need to return an error. */ + resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE); /* MA_TRUE=silent (don't print anything on error). */ + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun."); + return ma_result_from_errno((int)-resultALSA); + } + + /* + In my testing I have had a situation where writei() does not automatically restart the device even though I've set it + up as such in the software parameters. What will happen is writei() will block indefinitely even though the number of + frames is well beyond the auto-start threshold. To work around this I've needed to add an explicit start here. Not sure + if this is me just being stupid and not recovering the device properly, but this definitely feels like something isn't + quite right here. + */ + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun."); + return ma_result_from_errno((int)-resultALSA); + } + + continue; /* Try writing again. */ + } + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = resultALSA; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_data_loop_wakeup__alsa(ma_device* pDevice) +{ + ma_uint64 t = 1; + int resultWrite = 0; + + MA_ASSERT(pDevice != NULL); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Waking up...\n"); + + /* Write to an eventfd to trigger a wakeup from poll() and abort any reading or writing. */ + if (pDevice->alsa.pPollDescriptorsCapture != NULL) { + resultWrite = write(pDevice->alsa.wakeupfdCapture, &t, sizeof(t)); + } + if (pDevice->alsa.pPollDescriptorsPlayback != NULL) { + resultWrite = write(pDevice->alsa.wakeupfdPlayback, &t, sizeof(t)); + } + + if (resultWrite < 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] write() failed.\n"); + return ma_result_from_errno(errno); + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Waking up completed successfully.\n"); + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__alsa(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_alsa); + + /* Clean up memory for memory leak checkers. */ + ((ma_snd_config_update_free_global_proc)pContext->alsa.snd_config_update_free_global)(); + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->alsa.asoundSO); +#endif + + ma_mutex_uninit(&pContext->alsa.internalDeviceEnumLock); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__alsa(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result; +#ifndef MA_NO_RUNTIME_LINKING + const char* libasoundNames[] = { + "libasound.so.2", + "libasound.so" + }; + size_t i; + + for (i = 0; i < ma_countof(libasoundNames); ++i) { + pContext->alsa.asoundSO = ma_dlopen(ma_context_get_log(pContext), libasoundNames[i]); + if (pContext->alsa.asoundSO != NULL) { + break; + } + } + + if (pContext->alsa.asoundSO == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[ALSA] Failed to open shared object.\n"); + return MA_NO_BACKEND; + } + + pContext->alsa.snd_pcm_open = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_open"); + pContext->alsa.snd_pcm_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_close"); + pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_sizeof"); + pContext->alsa.snd_pcm_hw_params_any = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_any"); + pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format"); + pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format_first"); + pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format_mask"); + pContext->alsa.snd_pcm_hw_params_set_channels = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels"); + pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_near"); + pContext->alsa.snd_pcm_hw_params_set_channels_minmax = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_minmax"); + pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_resample"); + pContext->alsa.snd_pcm_hw_params_set_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate"); + pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_near"); + pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_buffer_size_near"); + pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_periods_near"); + pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_access"); + pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format"); + pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels"); + pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_min"); + pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_max"); + pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate"); + pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_min"); + pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_max"); + pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_buffer_size"); + pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_periods"); + pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_access"); + pContext->alsa.snd_pcm_hw_params_test_format = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_test_format"); + pContext->alsa.snd_pcm_hw_params_test_channels = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_test_channels"); + pContext->alsa.snd_pcm_hw_params_test_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_test_rate"); + pContext->alsa.snd_pcm_hw_params = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params"); + pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_sizeof"); + pContext->alsa.snd_pcm_sw_params_current = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_current"); + pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_get_boundary"); + pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_set_avail_min"); + pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_set_start_threshold"); + pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_set_stop_threshold"); + pContext->alsa.snd_pcm_sw_params = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params"); + pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_format_mask_sizeof"); + pContext->alsa.snd_pcm_format_mask_test = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_format_mask_test"); + pContext->alsa.snd_pcm_get_chmap = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_get_chmap"); + pContext->alsa.snd_pcm_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_state"); + pContext->alsa.snd_pcm_prepare = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_prepare"); + pContext->alsa.snd_pcm_start = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_start"); + pContext->alsa.snd_pcm_drop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_drop"); + pContext->alsa.snd_pcm_drain = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_drain"); + pContext->alsa.snd_pcm_reset = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_reset"); + pContext->alsa.snd_device_name_hint = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_device_name_hint"); + pContext->alsa.snd_device_name_get_hint = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_device_name_get_hint"); + pContext->alsa.snd_card_get_index = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_card_get_index"); + pContext->alsa.snd_device_name_free_hint = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_device_name_free_hint"); + pContext->alsa.snd_pcm_mmap_begin = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_mmap_begin"); + pContext->alsa.snd_pcm_mmap_commit = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_mmap_commit"); + pContext->alsa.snd_pcm_recover = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_recover"); + pContext->alsa.snd_pcm_readi = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_readi"); + pContext->alsa.snd_pcm_writei = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_writei"); + pContext->alsa.snd_pcm_avail = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_avail"); + pContext->alsa.snd_pcm_avail_update = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_avail_update"); + pContext->alsa.snd_pcm_wait = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_wait"); + pContext->alsa.snd_pcm_nonblock = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_nonblock"); + pContext->alsa.snd_pcm_info = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_info"); + pContext->alsa.snd_pcm_info_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_info_sizeof"); + pContext->alsa.snd_pcm_info_get_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_info_get_name"); + pContext->alsa.snd_pcm_poll_descriptors = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_poll_descriptors"); + pContext->alsa.snd_pcm_poll_descriptors_count = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_poll_descriptors_count"); + pContext->alsa.snd_pcm_poll_descriptors_revents = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_poll_descriptors_revents"); + pContext->alsa.snd_config_update_free_global = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_config_update_free_global"); +#else + /* The system below is just for type safety. */ + ma_snd_pcm_open_proc _snd_pcm_open = snd_pcm_open; + ma_snd_pcm_close_proc _snd_pcm_close = snd_pcm_close; + ma_snd_pcm_hw_params_sizeof_proc _snd_pcm_hw_params_sizeof = snd_pcm_hw_params_sizeof; + ma_snd_pcm_hw_params_any_proc _snd_pcm_hw_params_any = snd_pcm_hw_params_any; + ma_snd_pcm_hw_params_set_format_proc _snd_pcm_hw_params_set_format = snd_pcm_hw_params_set_format; + ma_snd_pcm_hw_params_set_format_first_proc _snd_pcm_hw_params_set_format_first = snd_pcm_hw_params_set_format_first; + ma_snd_pcm_hw_params_get_format_mask_proc _snd_pcm_hw_params_get_format_mask = snd_pcm_hw_params_get_format_mask; + ma_snd_pcm_hw_params_set_channels_proc _snd_pcm_hw_params_set_channels = snd_pcm_hw_params_set_channels; + ma_snd_pcm_hw_params_set_channels_near_proc _snd_pcm_hw_params_set_channels_near = snd_pcm_hw_params_set_channels_near; + ma_snd_pcm_hw_params_set_rate_resample_proc _snd_pcm_hw_params_set_rate_resample = snd_pcm_hw_params_set_rate_resample; + ma_snd_pcm_hw_params_set_rate_near _snd_pcm_hw_params_set_rate = snd_pcm_hw_params_set_rate; + ma_snd_pcm_hw_params_set_rate_near_proc _snd_pcm_hw_params_set_rate_near = snd_pcm_hw_params_set_rate_near; + ma_snd_pcm_hw_params_set_rate_minmax_proc _snd_pcm_hw_params_set_rate_minmax = snd_pcm_hw_params_set_rate_minmax; + ma_snd_pcm_hw_params_set_buffer_size_near_proc _snd_pcm_hw_params_set_buffer_size_near = snd_pcm_hw_params_set_buffer_size_near; + ma_snd_pcm_hw_params_set_periods_near_proc _snd_pcm_hw_params_set_periods_near = snd_pcm_hw_params_set_periods_near; + ma_snd_pcm_hw_params_set_access_proc _snd_pcm_hw_params_set_access = snd_pcm_hw_params_set_access; + ma_snd_pcm_hw_params_get_format_proc _snd_pcm_hw_params_get_format = snd_pcm_hw_params_get_format; + ma_snd_pcm_hw_params_get_channels_proc _snd_pcm_hw_params_get_channels = snd_pcm_hw_params_get_channels; + ma_snd_pcm_hw_params_get_channels_min_proc _snd_pcm_hw_params_get_channels_min = snd_pcm_hw_params_get_channels_min; + ma_snd_pcm_hw_params_get_channels_max_proc _snd_pcm_hw_params_get_channels_max = snd_pcm_hw_params_get_channels_max; + ma_snd_pcm_hw_params_get_rate_proc _snd_pcm_hw_params_get_rate = snd_pcm_hw_params_get_rate; + ma_snd_pcm_hw_params_get_rate_min_proc _snd_pcm_hw_params_get_rate_min = snd_pcm_hw_params_get_rate_min; + ma_snd_pcm_hw_params_get_rate_max_proc _snd_pcm_hw_params_get_rate_max = snd_pcm_hw_params_get_rate_max; + ma_snd_pcm_hw_params_get_buffer_size_proc _snd_pcm_hw_params_get_buffer_size = snd_pcm_hw_params_get_buffer_size; + ma_snd_pcm_hw_params_get_periods_proc _snd_pcm_hw_params_get_periods = snd_pcm_hw_params_get_periods; + ma_snd_pcm_hw_params_get_access_proc _snd_pcm_hw_params_get_access = snd_pcm_hw_params_get_access; + ma_snd_pcm_hw_params_test_format_proc _snd_pcm_hw_params_test_format = snd_pcm_hw_params_test_format; + ma_snd_pcm_hw_params_test_channels_proc _snd_pcm_hw_params_test_channels = snd_pcm_hw_params_test_channels; + ma_snd_pcm_hw_params_test_rate_proc _snd_pcm_hw_params_test_rate = snd_pcm_hw_params_test_rate; + ma_snd_pcm_hw_params_proc _snd_pcm_hw_params = snd_pcm_hw_params; + ma_snd_pcm_sw_params_sizeof_proc _snd_pcm_sw_params_sizeof = snd_pcm_sw_params_sizeof; + ma_snd_pcm_sw_params_current_proc _snd_pcm_sw_params_current = snd_pcm_sw_params_current; + ma_snd_pcm_sw_params_get_boundary_proc _snd_pcm_sw_params_get_boundary = snd_pcm_sw_params_get_boundary; + ma_snd_pcm_sw_params_set_avail_min_proc _snd_pcm_sw_params_set_avail_min = snd_pcm_sw_params_set_avail_min; + ma_snd_pcm_sw_params_set_start_threshold_proc _snd_pcm_sw_params_set_start_threshold = snd_pcm_sw_params_set_start_threshold; + ma_snd_pcm_sw_params_set_stop_threshold_proc _snd_pcm_sw_params_set_stop_threshold = snd_pcm_sw_params_set_stop_threshold; + ma_snd_pcm_sw_params_proc _snd_pcm_sw_params = snd_pcm_sw_params; + ma_snd_pcm_format_mask_sizeof_proc _snd_pcm_format_mask_sizeof = snd_pcm_format_mask_sizeof; + ma_snd_pcm_format_mask_test_proc _snd_pcm_format_mask_test = snd_pcm_format_mask_test; + ma_snd_pcm_get_chmap_proc _snd_pcm_get_chmap = snd_pcm_get_chmap; + ma_snd_pcm_state_proc _snd_pcm_state = snd_pcm_state; + ma_snd_pcm_prepare_proc _snd_pcm_prepare = snd_pcm_prepare; + ma_snd_pcm_start_proc _snd_pcm_start = snd_pcm_start; + ma_snd_pcm_drop_proc _snd_pcm_drop = snd_pcm_drop; + ma_snd_pcm_drain_proc _snd_pcm_drain = snd_pcm_drain; + ma_snd_pcm_reset_proc _snd_pcm_reset = snd_pcm_reset; + ma_snd_device_name_hint_proc _snd_device_name_hint = snd_device_name_hint; + ma_snd_device_name_get_hint_proc _snd_device_name_get_hint = snd_device_name_get_hint; + ma_snd_card_get_index_proc _snd_card_get_index = snd_card_get_index; + ma_snd_device_name_free_hint_proc _snd_device_name_free_hint = snd_device_name_free_hint; + ma_snd_pcm_mmap_begin_proc _snd_pcm_mmap_begin = snd_pcm_mmap_begin; + ma_snd_pcm_mmap_commit_proc _snd_pcm_mmap_commit = snd_pcm_mmap_commit; + ma_snd_pcm_recover_proc _snd_pcm_recover = snd_pcm_recover; + ma_snd_pcm_readi_proc _snd_pcm_readi = snd_pcm_readi; + ma_snd_pcm_writei_proc _snd_pcm_writei = snd_pcm_writei; + ma_snd_pcm_avail_proc _snd_pcm_avail = snd_pcm_avail; + ma_snd_pcm_avail_update_proc _snd_pcm_avail_update = snd_pcm_avail_update; + ma_snd_pcm_wait_proc _snd_pcm_wait = snd_pcm_wait; + ma_snd_pcm_nonblock_proc _snd_pcm_nonblock = snd_pcm_nonblock; + ma_snd_pcm_info_proc _snd_pcm_info = snd_pcm_info; + ma_snd_pcm_info_sizeof_proc _snd_pcm_info_sizeof = snd_pcm_info_sizeof; + ma_snd_pcm_info_get_name_proc _snd_pcm_info_get_name = snd_pcm_info_get_name; + ma_snd_pcm_poll_descriptors _snd_pcm_poll_descriptors = snd_pcm_poll_descriptors; + ma_snd_pcm_poll_descriptors_count _snd_pcm_poll_descriptors_count = snd_pcm_poll_descriptors_count; + ma_snd_pcm_poll_descriptors_revents _snd_pcm_poll_descriptors_revents = snd_pcm_poll_descriptors_revents; + ma_snd_config_update_free_global_proc _snd_config_update_free_global = snd_config_update_free_global; + + pContext->alsa.snd_pcm_open = (ma_proc)_snd_pcm_open; + pContext->alsa.snd_pcm_close = (ma_proc)_snd_pcm_close; + pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)_snd_pcm_hw_params_sizeof; + pContext->alsa.snd_pcm_hw_params_any = (ma_proc)_snd_pcm_hw_params_any; + pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)_snd_pcm_hw_params_set_format; + pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)_snd_pcm_hw_params_set_format_first; + pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)_snd_pcm_hw_params_get_format_mask; + pContext->alsa.snd_pcm_hw_params_set_channels = (ma_proc)_snd_pcm_hw_params_set_channels; + pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)_snd_pcm_hw_params_set_channels_near; + pContext->alsa.snd_pcm_hw_params_set_channels_minmax = (ma_proc)_snd_pcm_hw_params_set_channels_minmax; + pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)_snd_pcm_hw_params_set_rate_resample; + pContext->alsa.snd_pcm_hw_params_set_rate = (ma_proc)_snd_pcm_hw_params_set_rate; + pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)_snd_pcm_hw_params_set_rate_near; + pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)_snd_pcm_hw_params_set_buffer_size_near; + pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)_snd_pcm_hw_params_set_periods_near; + pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)_snd_pcm_hw_params_set_access; + pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)_snd_pcm_hw_params_get_format; + pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)_snd_pcm_hw_params_get_channels; + pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)_snd_pcm_hw_params_get_channels_min; + pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)_snd_pcm_hw_params_get_channels_max; + pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)_snd_pcm_hw_params_get_rate; + pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)_snd_pcm_hw_params_get_rate_min; + pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)_snd_pcm_hw_params_get_rate_max; + pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)_snd_pcm_hw_params_get_buffer_size; + pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)_snd_pcm_hw_params_get_periods; + pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)_snd_pcm_hw_params_get_access; + pContext->alsa.snd_pcm_hw_params_test_format = (ma_proc)_snd_pcm_hw_params_test_format; + pContext->alsa.snd_pcm_hw_params_test_channels = (ma_proc)_snd_pcm_hw_params_test_channels; + pContext->alsa.snd_pcm_hw_params_test_rate = (ma_proc)_snd_pcm_hw_params_test_rate; + pContext->alsa.snd_pcm_hw_params = (ma_proc)_snd_pcm_hw_params; + pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)_snd_pcm_sw_params_sizeof; + pContext->alsa.snd_pcm_sw_params_current = (ma_proc)_snd_pcm_sw_params_current; + pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)_snd_pcm_sw_params_get_boundary; + pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)_snd_pcm_sw_params_set_avail_min; + pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)_snd_pcm_sw_params_set_start_threshold; + pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)_snd_pcm_sw_params_set_stop_threshold; + pContext->alsa.snd_pcm_sw_params = (ma_proc)_snd_pcm_sw_params; + pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)_snd_pcm_format_mask_sizeof; + pContext->alsa.snd_pcm_format_mask_test = (ma_proc)_snd_pcm_format_mask_test; + pContext->alsa.snd_pcm_get_chmap = (ma_proc)_snd_pcm_get_chmap; + pContext->alsa.snd_pcm_state = (ma_proc)_snd_pcm_state; + pContext->alsa.snd_pcm_prepare = (ma_proc)_snd_pcm_prepare; + pContext->alsa.snd_pcm_start = (ma_proc)_snd_pcm_start; + pContext->alsa.snd_pcm_drop = (ma_proc)_snd_pcm_drop; + pContext->alsa.snd_pcm_drain = (ma_proc)_snd_pcm_drain; + pContext->alsa.snd_pcm_reset = (ma_proc)_snd_pcm_reset; + pContext->alsa.snd_device_name_hint = (ma_proc)_snd_device_name_hint; + pContext->alsa.snd_device_name_get_hint = (ma_proc)_snd_device_name_get_hint; + pContext->alsa.snd_card_get_index = (ma_proc)_snd_card_get_index; + pContext->alsa.snd_device_name_free_hint = (ma_proc)_snd_device_name_free_hint; + pContext->alsa.snd_pcm_mmap_begin = (ma_proc)_snd_pcm_mmap_begin; + pContext->alsa.snd_pcm_mmap_commit = (ma_proc)_snd_pcm_mmap_commit; + pContext->alsa.snd_pcm_recover = (ma_proc)_snd_pcm_recover; + pContext->alsa.snd_pcm_readi = (ma_proc)_snd_pcm_readi; + pContext->alsa.snd_pcm_writei = (ma_proc)_snd_pcm_writei; + pContext->alsa.snd_pcm_avail = (ma_proc)_snd_pcm_avail; + pContext->alsa.snd_pcm_avail_update = (ma_proc)_snd_pcm_avail_update; + pContext->alsa.snd_pcm_wait = (ma_proc)_snd_pcm_wait; + pContext->alsa.snd_pcm_nonblock = (ma_proc)_snd_pcm_nonblock; + pContext->alsa.snd_pcm_info = (ma_proc)_snd_pcm_info; + pContext->alsa.snd_pcm_info_sizeof = (ma_proc)_snd_pcm_info_sizeof; + pContext->alsa.snd_pcm_info_get_name = (ma_proc)_snd_pcm_info_get_name; + pContext->alsa.snd_pcm_poll_descriptors = (ma_proc)_snd_pcm_poll_descriptors; + pContext->alsa.snd_pcm_poll_descriptors_count = (ma_proc)_snd_pcm_poll_descriptors_count; + pContext->alsa.snd_pcm_poll_descriptors_revents = (ma_proc)_snd_pcm_poll_descriptors_revents; + pContext->alsa.snd_config_update_free_global = (ma_proc)_snd_config_update_free_global; +#endif + + pContext->alsa.useVerboseDeviceEnumeration = pConfig->alsa.useVerboseDeviceEnumeration; + + result = ma_mutex_init(&pContext->alsa.internalDeviceEnumLock); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] WARNING: Failed to initialize mutex for internal device enumeration."); + return result; + } + + pCallbacks->onContextInit = ma_context_init__alsa; + pCallbacks->onContextUninit = ma_context_uninit__alsa; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__alsa; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__alsa; + pCallbacks->onDeviceInit = ma_device_init__alsa; + pCallbacks->onDeviceUninit = ma_device_uninit__alsa; + pCallbacks->onDeviceStart = ma_device_start__alsa; + pCallbacks->onDeviceStop = ma_device_stop__alsa; + pCallbacks->onDeviceRead = ma_device_read__alsa; + pCallbacks->onDeviceWrite = ma_device_write__alsa; + pCallbacks->onDeviceDataLoop = NULL; + pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__alsa; + + return MA_SUCCESS; +} +#endif /* ALSA */ + + + +/****************************************************************************** + +PulseAudio Backend + +******************************************************************************/ +#ifdef MA_HAS_PULSEAUDIO +/* +The PulseAudio API, along with Apple's Core Audio, is the worst of the maintream audio APIs. This is a brief description of what's going on +in the PulseAudio backend. I apologize if this gets a bit ranty for your liking - you might want to skip this discussion. + +PulseAudio has something they call the "Simple API", which unfortunately isn't suitable for miniaudio. I've not seen anywhere where it +allows you to enumerate over devices, nor does it seem to support the ability to stop and start streams. Looking at the documentation, it +appears as though the stream is constantly running and you prevent sound from being emitted or captured by simply not calling the read or +write functions. This is not a professional solution as it would be much better to *actually* stop the underlying stream. Perhaps the +simple API has some smarts to do this automatically, but I'm not sure. Another limitation with the simple API is that it seems inefficient +when you want to have multiple streams to a single context. For these reasons, miniaudio is not using the simple API. + +Since we're not using the simple API, we're left with the asynchronous API as our only other option. And boy, is this where it starts to +get fun, and I don't mean that in a good way... + +The problems start with the very name of the API - "asynchronous". Yes, this is an asynchronous oriented API which means your commands +don't immediately take effect. You instead need to issue your commands, and then wait for them to complete. The waiting mechanism is +enabled through the use of a "main loop". In the asychronous API you cannot get away from the main loop, and the main loop is where almost +all of PulseAudio's problems stem from. + +When you first initialize PulseAudio you need an object referred to as "main loop". You can implement this yourself by defining your own +vtable, but it's much easier to just use one of the built-in main loop implementations. There's two generic implementations called +pa_mainloop and pa_threaded_mainloop, and another implementation specific to GLib called pa_glib_mainloop. We're using pa_threaded_mainloop +because it simplifies management of the worker thread. The idea of the main loop object is pretty self explanatory - you're supposed to use +it to implement a worker thread which runs in a loop. The main loop is where operations are actually executed. + +To initialize the main loop, you just use `pa_threaded_mainloop_new()`. This is the first function you'll call. You can then get a pointer +to the vtable with `pa_threaded_mainloop_get_api()` (the main loop vtable is called `pa_mainloop_api`). Again, you can bypass the threaded +main loop object entirely and just implement `pa_mainloop_api` directly, but there's no need for it unless you're doing something extremely +specialized such as if you want to integrate it into your application's existing main loop infrastructure. + +(EDIT 2021-01-26: miniaudio is no longer using `pa_threaded_mainloop` due to this issue: https://github.com/mackron/miniaudio/issues/262. +It is now using `pa_mainloop` which turns out to be a simpler solution anyway. The rest of this rant still applies, however.) + +Once you have your main loop vtable (the `pa_mainloop_api` object) you can create the PulseAudio context. This is very similar to +miniaudio's context and they map to each other quite well. You have one context to many streams, which is basically the same as miniaudio's +one `ma_context` to many `ma_device`s. Here's where it starts to get annoying, however. When you first create the PulseAudio context, which +is done with `pa_context_new()`, it's not actually connected to anything. When you connect, you call `pa_context_connect()`. However, if +you remember, PulseAudio is an asynchronous API. That means you cannot just assume the context is connected after `pa_context_context()` +has returned. You instead need to wait for it to connect. To do this, you need to either wait for a callback to get fired, which you can +set with `pa_context_set_state_callback()`, or you can continuously poll the context's state. Either way, you need to run this in a loop. +All objects from here out are created from the context, and, I believe, you can't be creating these objects until the context is connected. +This waiting loop is therefore unavoidable. In order for the waiting to ever complete, however, the main loop needs to be running. Before +attempting to connect the context, the main loop needs to be started with `pa_threaded_mainloop_start()`. + +The reason for this asynchronous design is to support cases where you're connecting to a remote server, say through a local network or an +internet connection. However, the *VAST* majority of cases don't involve this at all - they just connect to a local "server" running on the +host machine. The fact that this would be the default rather than making `pa_context_connect()` synchronous tends to boggle the mind. + +Once the context has been created and connected you can start creating a stream. A PulseAudio stream is analogous to miniaudio's device. +The initialization of a stream is fairly standard - you configure some attributes (analogous to miniaudio's device config) and then call +`pa_stream_new()` to actually create it. Here is where we start to get into "operations". When configuring the stream, you can get +information about the source (such as sample format, sample rate, etc.), however it's not synchronous. Instead, a `pa_operation` object +is returned from `pa_context_get_source_info_by_name()` (capture) or `pa_context_get_sink_info_by_name()` (playback). Then, you need to +run a loop (again!) to wait for the operation to complete which you can determine via a callback or polling, just like we did with the +context. Then, as an added bonus, you need to decrement the reference counter of the `pa_operation` object to ensure memory is cleaned up. +All of that just to retrieve basic information about a device! + +Once the basic information about the device has been retrieved, miniaudio can now create the stream with `ma_stream_new()`. Like the +context, this needs to be connected. But we need to be careful here, because we're now about to introduce one of the most horrific design +choices in PulseAudio. + +PulseAudio allows you to specify a callback that is fired when data can be written to or read from a stream. The language is important here +because PulseAudio takes it literally, specifically the "can be". You would think these callbacks would be appropriate as the place for +writing and reading data to and from the stream, and that would be right, except when it's not. When you initialize the stream, you can +set a flag that tells PulseAudio to not start the stream automatically. This is required because miniaudio does not auto-start devices +straight after initialization - you need to call `ma_device_start()` manually. The problem is that even when this flag is specified, +PulseAudio will immediately fire it's write or read callback. This is *technically* correct (based on the wording in the documentation) +because indeed, data *can* be written at this point. The problem is that it's not *practical*. It makes sense that the write/read callback +would be where a program will want to write or read data to or from the stream, but when it's called before the application has even +requested that the stream be started, it's just not practical because the program probably isn't ready for any kind of data delivery at +that point (it may still need to load files or whatnot). Instead, this callback should only be fired when the application requests the +stream be started which is how it works with literally *every* other callback-based audio API. Since miniaudio forbids firing of the data +callback until the device has been started (as it should be with *all* callback based APIs), logic needs to be added to ensure miniaudio +doesn't just blindly fire the application-defined data callback from within the PulseAudio callback before the stream has actually been +started. The device state is used for this - if the state is anything other than `ma_device_state_starting` or `ma_device_state_started`, the main data +callback is not fired. + +This, unfortunately, is not the end of the problems with the PulseAudio write callback. Any normal callback based audio API will +continuously fire the callback at regular intervals based on the size of the internal buffer. This will only ever be fired when the device +is running, and will be fired regardless of whether or not the user actually wrote anything to the device/stream. This not the case in +PulseAudio. In PulseAudio, the data callback will *only* be called if you wrote something to it previously. That means, if you don't call +`pa_stream_write()`, the callback will not get fired. On the surface you wouldn't think this would matter because you should be always +writing data, and if you don't have anything to write, just write silence. That's fine until you want to drain the stream. You see, if +you're continuously writing data to the stream, the stream will never get drained! That means in order to drain the stream, you need to +*not* write data to it! But remember, when you don't write data to the stream, the callback won't get fired again! Why is draining +important? Because that's how we've defined stopping to work in miniaudio. In miniaudio, stopping the device requires it to be drained +before returning from ma_device_stop(). So we've stopped the device, which requires us to drain, but draining requires us to *not* write +data to the stream (or else it won't ever complete draining), but not writing to the stream means the callback won't get fired again! + +This becomes a problem when stopping and then restarting the device. When the device is stopped, it's drained, which requires us to *not* +write anything to the stream. But then, since we didn't write anything to it, the write callback will *never* get called again if we just +resume the stream naively. This means that starting the stream requires us to write data to the stream from outside the callback. This +disconnect is something PulseAudio has got seriously wrong - there should only ever be a single source of data delivery, that being the +callback. (I have tried using `pa_stream_flush()` to trigger the write callback to fire, but this just doesn't work for some reason.) + +Once you've created the stream, you need to connect it which involves the whole waiting procedure. This is the same process as the context, +only this time you'll poll for the state with `pa_stream_get_status()`. The starting and stopping of a streaming is referred to as +"corking" in PulseAudio. The analogy is corking a barrel. To start the stream, you uncork it, to stop it you cork it. Personally I think +it's silly - why would you not just call it "starting" and "stopping" like any other normal audio API? Anyway, the act of corking is, you +guessed it, asynchronous. This means you'll need our waiting loop as usual. Again, why this asynchronous design is the default is +absolutely beyond me. Would it really be that hard to just make it run synchronously? + +Teardown is pretty simple (what?!). It's just a matter of calling the relevant `_unref()` function on each object in reverse order that +they were initialized in. + +That's about it from the PulseAudio side. A bit ranty, I know, but they really need to fix that main loop and callback system. They're +embarrassingly unpractical. The main loop thing is an easy fix - have synchronous versions of all APIs. If an application wants these to +run asynchronously, they can execute them in a separate thread themselves. The desire to run these asynchronously is such a niche +requirement - it makes no sense to make it the default. The stream write callback needs to be change, or an alternative provided, that is +constantly fired, regardless of whether or not `pa_stream_write()` has been called, and it needs to take a pointer to a buffer as a +parameter which the program just writes to directly rather than having to call `pa_stream_writable_size()` and `pa_stream_write()`. These +changes alone will change PulseAudio from one of the worst audio APIs to one of the best. +*/ + + +/* +It is assumed pulseaudio.h is available when linking at compile time. When linking at compile time, we use the declarations in the header +to check for type safety. We cannot do this when linking at run time because the header might not be available. +*/ +#ifdef MA_NO_RUNTIME_LINKING + +/* pulseaudio.h marks some functions with "inline" which isn't always supported. Need to emulate it. */ +#if !defined(__cplusplus) +#if defined(__STRICT_ANSI__) +#if !defined(inline) +#define inline __inline__ __attribute__((always_inline)) +#define MA_INLINE_DEFINED +#endif +#endif +#endif +#include +#if defined(MA_INLINE_DEFINED) +#undef inline +#undef MA_INLINE_DEFINED +#endif + +#define MA_PA_OK PA_OK +#define MA_PA_ERR_ACCESS PA_ERR_ACCESS +#define MA_PA_ERR_INVALID PA_ERR_INVALID +#define MA_PA_ERR_NOENTITY PA_ERR_NOENTITY +#define MA_PA_ERR_NOTSUPPORTED PA_ERR_NOTSUPPORTED + +#define MA_PA_CHANNELS_MAX PA_CHANNELS_MAX +#define MA_PA_RATE_MAX PA_RATE_MAX + +typedef pa_context_flags_t ma_pa_context_flags_t; +#define MA_PA_CONTEXT_NOFLAGS PA_CONTEXT_NOFLAGS +#define MA_PA_CONTEXT_NOAUTOSPAWN PA_CONTEXT_NOAUTOSPAWN +#define MA_PA_CONTEXT_NOFAIL PA_CONTEXT_NOFAIL + +typedef pa_stream_flags_t ma_pa_stream_flags_t; +#define MA_PA_STREAM_NOFLAGS PA_STREAM_NOFLAGS +#define MA_PA_STREAM_START_CORKED PA_STREAM_START_CORKED +#define MA_PA_STREAM_INTERPOLATE_TIMING PA_STREAM_INTERPOLATE_TIMING +#define MA_PA_STREAM_NOT_MONOTONIC PA_STREAM_NOT_MONOTONIC +#define MA_PA_STREAM_AUTO_TIMING_UPDATE PA_STREAM_AUTO_TIMING_UPDATE +#define MA_PA_STREAM_NO_REMAP_CHANNELS PA_STREAM_NO_REMAP_CHANNELS +#define MA_PA_STREAM_NO_REMIX_CHANNELS PA_STREAM_NO_REMIX_CHANNELS +#define MA_PA_STREAM_FIX_FORMAT PA_STREAM_FIX_FORMAT +#define MA_PA_STREAM_FIX_RATE PA_STREAM_FIX_RATE +#define MA_PA_STREAM_FIX_CHANNELS PA_STREAM_FIX_CHANNELS +#define MA_PA_STREAM_DONT_MOVE PA_STREAM_DONT_MOVE +#define MA_PA_STREAM_VARIABLE_RATE PA_STREAM_VARIABLE_RATE +#define MA_PA_STREAM_PEAK_DETECT PA_STREAM_PEAK_DETECT +#define MA_PA_STREAM_START_MUTED PA_STREAM_START_MUTED +#define MA_PA_STREAM_ADJUST_LATENCY PA_STREAM_ADJUST_LATENCY +#define MA_PA_STREAM_EARLY_REQUESTS PA_STREAM_EARLY_REQUESTS +#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND +#define MA_PA_STREAM_START_UNMUTED PA_STREAM_START_UNMUTED +#define MA_PA_STREAM_FAIL_ON_SUSPEND PA_STREAM_FAIL_ON_SUSPEND +#define MA_PA_STREAM_RELATIVE_VOLUME PA_STREAM_RELATIVE_VOLUME +#define MA_PA_STREAM_PASSTHROUGH PA_STREAM_PASSTHROUGH + +typedef pa_sink_flags_t ma_pa_sink_flags_t; +#define MA_PA_SINK_NOFLAGS PA_SINK_NOFLAGS +#define MA_PA_SINK_HW_VOLUME_CTRL PA_SINK_HW_VOLUME_CTRL +#define MA_PA_SINK_LATENCY PA_SINK_LATENCY +#define MA_PA_SINK_HARDWARE PA_SINK_HARDWARE +#define MA_PA_SINK_NETWORK PA_SINK_NETWORK +#define MA_PA_SINK_HW_MUTE_CTRL PA_SINK_HW_MUTE_CTRL +#define MA_PA_SINK_DECIBEL_VOLUME PA_SINK_DECIBEL_VOLUME +#define MA_PA_SINK_FLAT_VOLUME PA_SINK_FLAT_VOLUME +#define MA_PA_SINK_DYNAMIC_LATENCY PA_SINK_DYNAMIC_LATENCY +#define MA_PA_SINK_SET_FORMATS PA_SINK_SET_FORMATS + +typedef pa_source_flags_t ma_pa_source_flags_t; +#define MA_PA_SOURCE_NOFLAGS PA_SOURCE_NOFLAGS +#define MA_PA_SOURCE_HW_VOLUME_CTRL PA_SOURCE_HW_VOLUME_CTRL +#define MA_PA_SOURCE_LATENCY PA_SOURCE_LATENCY +#define MA_PA_SOURCE_HARDWARE PA_SOURCE_HARDWARE +#define MA_PA_SOURCE_NETWORK PA_SOURCE_NETWORK +#define MA_PA_SOURCE_HW_MUTE_CTRL PA_SOURCE_HW_MUTE_CTRL +#define MA_PA_SOURCE_DECIBEL_VOLUME PA_SOURCE_DECIBEL_VOLUME +#define MA_PA_SOURCE_DYNAMIC_LATENCY PA_SOURCE_DYNAMIC_LATENCY +#define MA_PA_SOURCE_FLAT_VOLUME PA_SOURCE_FLAT_VOLUME + +typedef pa_context_state_t ma_pa_context_state_t; +#define MA_PA_CONTEXT_UNCONNECTED PA_CONTEXT_UNCONNECTED +#define MA_PA_CONTEXT_CONNECTING PA_CONTEXT_CONNECTING +#define MA_PA_CONTEXT_AUTHORIZING PA_CONTEXT_AUTHORIZING +#define MA_PA_CONTEXT_SETTING_NAME PA_CONTEXT_SETTING_NAME +#define MA_PA_CONTEXT_READY PA_CONTEXT_READY +#define MA_PA_CONTEXT_FAILED PA_CONTEXT_FAILED +#define MA_PA_CONTEXT_TERMINATED PA_CONTEXT_TERMINATED + +typedef pa_stream_state_t ma_pa_stream_state_t; +#define MA_PA_STREAM_UNCONNECTED PA_STREAM_UNCONNECTED +#define MA_PA_STREAM_CREATING PA_STREAM_CREATING +#define MA_PA_STREAM_READY PA_STREAM_READY +#define MA_PA_STREAM_FAILED PA_STREAM_FAILED +#define MA_PA_STREAM_TERMINATED PA_STREAM_TERMINATED + +typedef pa_operation_state_t ma_pa_operation_state_t; +#define MA_PA_OPERATION_RUNNING PA_OPERATION_RUNNING +#define MA_PA_OPERATION_DONE PA_OPERATION_DONE +#define MA_PA_OPERATION_CANCELLED PA_OPERATION_CANCELLED + +typedef pa_sink_state_t ma_pa_sink_state_t; +#define MA_PA_SINK_INVALID_STATE PA_SINK_INVALID_STATE +#define MA_PA_SINK_RUNNING PA_SINK_RUNNING +#define MA_PA_SINK_IDLE PA_SINK_IDLE +#define MA_PA_SINK_SUSPENDED PA_SINK_SUSPENDED + +typedef pa_source_state_t ma_pa_source_state_t; +#define MA_PA_SOURCE_INVALID_STATE PA_SOURCE_INVALID_STATE +#define MA_PA_SOURCE_RUNNING PA_SOURCE_RUNNING +#define MA_PA_SOURCE_IDLE PA_SOURCE_IDLE +#define MA_PA_SOURCE_SUSPENDED PA_SOURCE_SUSPENDED + +typedef pa_seek_mode_t ma_pa_seek_mode_t; +#define MA_PA_SEEK_RELATIVE PA_SEEK_RELATIVE +#define MA_PA_SEEK_ABSOLUTE PA_SEEK_ABSOLUTE +#define MA_PA_SEEK_RELATIVE_ON_READ PA_SEEK_RELATIVE_ON_READ +#define MA_PA_SEEK_RELATIVE_END PA_SEEK_RELATIVE_END + +typedef pa_channel_position_t ma_pa_channel_position_t; +#define MA_PA_CHANNEL_POSITION_INVALID PA_CHANNEL_POSITION_INVALID +#define MA_PA_CHANNEL_POSITION_MONO PA_CHANNEL_POSITION_MONO +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT PA_CHANNEL_POSITION_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT PA_CHANNEL_POSITION_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_FRONT_CENTER PA_CHANNEL_POSITION_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_REAR_CENTER PA_CHANNEL_POSITION_REAR_CENTER +#define MA_PA_CHANNEL_POSITION_REAR_LEFT PA_CHANNEL_POSITION_REAR_LEFT +#define MA_PA_CHANNEL_POSITION_REAR_RIGHT PA_CHANNEL_POSITION_REAR_RIGHT +#define MA_PA_CHANNEL_POSITION_LFE PA_CHANNEL_POSITION_LFE +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER +#define MA_PA_CHANNEL_POSITION_SIDE_LEFT PA_CHANNEL_POSITION_SIDE_LEFT +#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT PA_CHANNEL_POSITION_SIDE_RIGHT +#define MA_PA_CHANNEL_POSITION_AUX0 PA_CHANNEL_POSITION_AUX0 +#define MA_PA_CHANNEL_POSITION_AUX1 PA_CHANNEL_POSITION_AUX1 +#define MA_PA_CHANNEL_POSITION_AUX2 PA_CHANNEL_POSITION_AUX2 +#define MA_PA_CHANNEL_POSITION_AUX3 PA_CHANNEL_POSITION_AUX3 +#define MA_PA_CHANNEL_POSITION_AUX4 PA_CHANNEL_POSITION_AUX4 +#define MA_PA_CHANNEL_POSITION_AUX5 PA_CHANNEL_POSITION_AUX5 +#define MA_PA_CHANNEL_POSITION_AUX6 PA_CHANNEL_POSITION_AUX6 +#define MA_PA_CHANNEL_POSITION_AUX7 PA_CHANNEL_POSITION_AUX7 +#define MA_PA_CHANNEL_POSITION_AUX8 PA_CHANNEL_POSITION_AUX8 +#define MA_PA_CHANNEL_POSITION_AUX9 PA_CHANNEL_POSITION_AUX9 +#define MA_PA_CHANNEL_POSITION_AUX10 PA_CHANNEL_POSITION_AUX10 +#define MA_PA_CHANNEL_POSITION_AUX11 PA_CHANNEL_POSITION_AUX11 +#define MA_PA_CHANNEL_POSITION_AUX12 PA_CHANNEL_POSITION_AUX12 +#define MA_PA_CHANNEL_POSITION_AUX13 PA_CHANNEL_POSITION_AUX13 +#define MA_PA_CHANNEL_POSITION_AUX14 PA_CHANNEL_POSITION_AUX14 +#define MA_PA_CHANNEL_POSITION_AUX15 PA_CHANNEL_POSITION_AUX15 +#define MA_PA_CHANNEL_POSITION_AUX16 PA_CHANNEL_POSITION_AUX16 +#define MA_PA_CHANNEL_POSITION_AUX17 PA_CHANNEL_POSITION_AUX17 +#define MA_PA_CHANNEL_POSITION_AUX18 PA_CHANNEL_POSITION_AUX18 +#define MA_PA_CHANNEL_POSITION_AUX19 PA_CHANNEL_POSITION_AUX19 +#define MA_PA_CHANNEL_POSITION_AUX20 PA_CHANNEL_POSITION_AUX20 +#define MA_PA_CHANNEL_POSITION_AUX21 PA_CHANNEL_POSITION_AUX21 +#define MA_PA_CHANNEL_POSITION_AUX22 PA_CHANNEL_POSITION_AUX22 +#define MA_PA_CHANNEL_POSITION_AUX23 PA_CHANNEL_POSITION_AUX23 +#define MA_PA_CHANNEL_POSITION_AUX24 PA_CHANNEL_POSITION_AUX24 +#define MA_PA_CHANNEL_POSITION_AUX25 PA_CHANNEL_POSITION_AUX25 +#define MA_PA_CHANNEL_POSITION_AUX26 PA_CHANNEL_POSITION_AUX26 +#define MA_PA_CHANNEL_POSITION_AUX27 PA_CHANNEL_POSITION_AUX27 +#define MA_PA_CHANNEL_POSITION_AUX28 PA_CHANNEL_POSITION_AUX28 +#define MA_PA_CHANNEL_POSITION_AUX29 PA_CHANNEL_POSITION_AUX29 +#define MA_PA_CHANNEL_POSITION_AUX30 PA_CHANNEL_POSITION_AUX30 +#define MA_PA_CHANNEL_POSITION_AUX31 PA_CHANNEL_POSITION_AUX31 +#define MA_PA_CHANNEL_POSITION_TOP_CENTER PA_CHANNEL_POSITION_TOP_CENTER +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT PA_CHANNEL_POSITION_TOP_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT PA_CHANNEL_POSITION_TOP_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER PA_CHANNEL_POSITION_TOP_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT PA_CHANNEL_POSITION_TOP_REAR_LEFT +#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT PA_CHANNEL_POSITION_TOP_REAR_RIGHT +#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER PA_CHANNEL_POSITION_TOP_REAR_CENTER +#define MA_PA_CHANNEL_POSITION_LEFT PA_CHANNEL_POSITION_LEFT +#define MA_PA_CHANNEL_POSITION_RIGHT PA_CHANNEL_POSITION_RIGHT +#define MA_PA_CHANNEL_POSITION_CENTER PA_CHANNEL_POSITION_CENTER +#define MA_PA_CHANNEL_POSITION_SUBWOOFER PA_CHANNEL_POSITION_SUBWOOFER + +typedef pa_channel_map_def_t ma_pa_channel_map_def_t; +#define MA_PA_CHANNEL_MAP_AIFF PA_CHANNEL_MAP_AIFF +#define MA_PA_CHANNEL_MAP_ALSA PA_CHANNEL_MAP_ALSA +#define MA_PA_CHANNEL_MAP_AUX PA_CHANNEL_MAP_AUX +#define MA_PA_CHANNEL_MAP_WAVEEX PA_CHANNEL_MAP_WAVEEX +#define MA_PA_CHANNEL_MAP_OSS PA_CHANNEL_MAP_OSS +#define MA_PA_CHANNEL_MAP_DEFAULT PA_CHANNEL_MAP_DEFAULT + +typedef pa_sample_format_t ma_pa_sample_format_t; +#define MA_PA_SAMPLE_INVALID PA_SAMPLE_INVALID +#define MA_PA_SAMPLE_U8 PA_SAMPLE_U8 +#define MA_PA_SAMPLE_ALAW PA_SAMPLE_ALAW +#define MA_PA_SAMPLE_ULAW PA_SAMPLE_ULAW +#define MA_PA_SAMPLE_S16LE PA_SAMPLE_S16LE +#define MA_PA_SAMPLE_S16BE PA_SAMPLE_S16BE +#define MA_PA_SAMPLE_FLOAT32LE PA_SAMPLE_FLOAT32LE +#define MA_PA_SAMPLE_FLOAT32BE PA_SAMPLE_FLOAT32BE +#define MA_PA_SAMPLE_S32LE PA_SAMPLE_S32LE +#define MA_PA_SAMPLE_S32BE PA_SAMPLE_S32BE +#define MA_PA_SAMPLE_S24LE PA_SAMPLE_S24LE +#define MA_PA_SAMPLE_S24BE PA_SAMPLE_S24BE +#define MA_PA_SAMPLE_S24_32LE PA_SAMPLE_S24_32LE +#define MA_PA_SAMPLE_S24_32BE PA_SAMPLE_S24_32BE + +typedef pa_mainloop ma_pa_mainloop; +typedef pa_threaded_mainloop ma_pa_threaded_mainloop; +typedef pa_mainloop_api ma_pa_mainloop_api; +typedef pa_context ma_pa_context; +typedef pa_operation ma_pa_operation; +typedef pa_stream ma_pa_stream; +typedef pa_spawn_api ma_pa_spawn_api; +typedef pa_buffer_attr ma_pa_buffer_attr; +typedef pa_channel_map ma_pa_channel_map; +typedef pa_cvolume ma_pa_cvolume; +typedef pa_sample_spec ma_pa_sample_spec; +typedef pa_sink_info ma_pa_sink_info; +typedef pa_source_info ma_pa_source_info; + +typedef pa_context_notify_cb_t ma_pa_context_notify_cb_t; +typedef pa_sink_info_cb_t ma_pa_sink_info_cb_t; +typedef pa_source_info_cb_t ma_pa_source_info_cb_t; +typedef pa_stream_success_cb_t ma_pa_stream_success_cb_t; +typedef pa_stream_request_cb_t ma_pa_stream_request_cb_t; +typedef pa_stream_notify_cb_t ma_pa_stream_notify_cb_t; +typedef pa_free_cb_t ma_pa_free_cb_t; +#else +#define MA_PA_OK 0 +#define MA_PA_ERR_ACCESS 1 +#define MA_PA_ERR_INVALID 2 +#define MA_PA_ERR_NOENTITY 5 +#define MA_PA_ERR_NOTSUPPORTED 19 + +#define MA_PA_CHANNELS_MAX 32 +#define MA_PA_RATE_MAX 384000 + +typedef int ma_pa_context_flags_t; +#define MA_PA_CONTEXT_NOFLAGS 0x00000000 +#define MA_PA_CONTEXT_NOAUTOSPAWN 0x00000001 +#define MA_PA_CONTEXT_NOFAIL 0x00000002 + +typedef int ma_pa_stream_flags_t; +#define MA_PA_STREAM_NOFLAGS 0x00000000 +#define MA_PA_STREAM_START_CORKED 0x00000001 +#define MA_PA_STREAM_INTERPOLATE_TIMING 0x00000002 +#define MA_PA_STREAM_NOT_MONOTONIC 0x00000004 +#define MA_PA_STREAM_AUTO_TIMING_UPDATE 0x00000008 +#define MA_PA_STREAM_NO_REMAP_CHANNELS 0x00000010 +#define MA_PA_STREAM_NO_REMIX_CHANNELS 0x00000020 +#define MA_PA_STREAM_FIX_FORMAT 0x00000040 +#define MA_PA_STREAM_FIX_RATE 0x00000080 +#define MA_PA_STREAM_FIX_CHANNELS 0x00000100 +#define MA_PA_STREAM_DONT_MOVE 0x00000200 +#define MA_PA_STREAM_VARIABLE_RATE 0x00000400 +#define MA_PA_STREAM_PEAK_DETECT 0x00000800 +#define MA_PA_STREAM_START_MUTED 0x00001000 +#define MA_PA_STREAM_ADJUST_LATENCY 0x00002000 +#define MA_PA_STREAM_EARLY_REQUESTS 0x00004000 +#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND 0x00008000 +#define MA_PA_STREAM_START_UNMUTED 0x00010000 +#define MA_PA_STREAM_FAIL_ON_SUSPEND 0x00020000 +#define MA_PA_STREAM_RELATIVE_VOLUME 0x00040000 +#define MA_PA_STREAM_PASSTHROUGH 0x00080000 + +typedef int ma_pa_sink_flags_t; +#define MA_PA_SINK_NOFLAGS 0x00000000 +#define MA_PA_SINK_HW_VOLUME_CTRL 0x00000001 +#define MA_PA_SINK_LATENCY 0x00000002 +#define MA_PA_SINK_HARDWARE 0x00000004 +#define MA_PA_SINK_NETWORK 0x00000008 +#define MA_PA_SINK_HW_MUTE_CTRL 0x00000010 +#define MA_PA_SINK_DECIBEL_VOLUME 0x00000020 +#define MA_PA_SINK_FLAT_VOLUME 0x00000040 +#define MA_PA_SINK_DYNAMIC_LATENCY 0x00000080 +#define MA_PA_SINK_SET_FORMATS 0x00000100 + +typedef int ma_pa_source_flags_t; +#define MA_PA_SOURCE_NOFLAGS 0x00000000 +#define MA_PA_SOURCE_HW_VOLUME_CTRL 0x00000001 +#define MA_PA_SOURCE_LATENCY 0x00000002 +#define MA_PA_SOURCE_HARDWARE 0x00000004 +#define MA_PA_SOURCE_NETWORK 0x00000008 +#define MA_PA_SOURCE_HW_MUTE_CTRL 0x00000010 +#define MA_PA_SOURCE_DECIBEL_VOLUME 0x00000020 +#define MA_PA_SOURCE_DYNAMIC_LATENCY 0x00000040 +#define MA_PA_SOURCE_FLAT_VOLUME 0x00000080 + +typedef int ma_pa_context_state_t; +#define MA_PA_CONTEXT_UNCONNECTED 0 +#define MA_PA_CONTEXT_CONNECTING 1 +#define MA_PA_CONTEXT_AUTHORIZING 2 +#define MA_PA_CONTEXT_SETTING_NAME 3 +#define MA_PA_CONTEXT_READY 4 +#define MA_PA_CONTEXT_FAILED 5 +#define MA_PA_CONTEXT_TERMINATED 6 + +typedef int ma_pa_stream_state_t; +#define MA_PA_STREAM_UNCONNECTED 0 +#define MA_PA_STREAM_CREATING 1 +#define MA_PA_STREAM_READY 2 +#define MA_PA_STREAM_FAILED 3 +#define MA_PA_STREAM_TERMINATED 4 + +typedef int ma_pa_operation_state_t; +#define MA_PA_OPERATION_RUNNING 0 +#define MA_PA_OPERATION_DONE 1 +#define MA_PA_OPERATION_CANCELLED 2 + +typedef int ma_pa_sink_state_t; +#define MA_PA_SINK_INVALID_STATE -1 +#define MA_PA_SINK_RUNNING 0 +#define MA_PA_SINK_IDLE 1 +#define MA_PA_SINK_SUSPENDED 2 + +typedef int ma_pa_source_state_t; +#define MA_PA_SOURCE_INVALID_STATE -1 +#define MA_PA_SOURCE_RUNNING 0 +#define MA_PA_SOURCE_IDLE 1 +#define MA_PA_SOURCE_SUSPENDED 2 + +typedef int ma_pa_seek_mode_t; +#define MA_PA_SEEK_RELATIVE 0 +#define MA_PA_SEEK_ABSOLUTE 1 +#define MA_PA_SEEK_RELATIVE_ON_READ 2 +#define MA_PA_SEEK_RELATIVE_END 3 + +typedef int ma_pa_channel_position_t; +#define MA_PA_CHANNEL_POSITION_INVALID -1 +#define MA_PA_CHANNEL_POSITION_MONO 0 +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT 1 +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT 2 +#define MA_PA_CHANNEL_POSITION_FRONT_CENTER 3 +#define MA_PA_CHANNEL_POSITION_REAR_CENTER 4 +#define MA_PA_CHANNEL_POSITION_REAR_LEFT 5 +#define MA_PA_CHANNEL_POSITION_REAR_RIGHT 6 +#define MA_PA_CHANNEL_POSITION_LFE 7 +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER 8 +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER 9 +#define MA_PA_CHANNEL_POSITION_SIDE_LEFT 10 +#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT 11 +#define MA_PA_CHANNEL_POSITION_AUX0 12 +#define MA_PA_CHANNEL_POSITION_AUX1 13 +#define MA_PA_CHANNEL_POSITION_AUX2 14 +#define MA_PA_CHANNEL_POSITION_AUX3 15 +#define MA_PA_CHANNEL_POSITION_AUX4 16 +#define MA_PA_CHANNEL_POSITION_AUX5 17 +#define MA_PA_CHANNEL_POSITION_AUX6 18 +#define MA_PA_CHANNEL_POSITION_AUX7 19 +#define MA_PA_CHANNEL_POSITION_AUX8 20 +#define MA_PA_CHANNEL_POSITION_AUX9 21 +#define MA_PA_CHANNEL_POSITION_AUX10 22 +#define MA_PA_CHANNEL_POSITION_AUX11 23 +#define MA_PA_CHANNEL_POSITION_AUX12 24 +#define MA_PA_CHANNEL_POSITION_AUX13 25 +#define MA_PA_CHANNEL_POSITION_AUX14 26 +#define MA_PA_CHANNEL_POSITION_AUX15 27 +#define MA_PA_CHANNEL_POSITION_AUX16 28 +#define MA_PA_CHANNEL_POSITION_AUX17 29 +#define MA_PA_CHANNEL_POSITION_AUX18 30 +#define MA_PA_CHANNEL_POSITION_AUX19 31 +#define MA_PA_CHANNEL_POSITION_AUX20 32 +#define MA_PA_CHANNEL_POSITION_AUX21 33 +#define MA_PA_CHANNEL_POSITION_AUX22 34 +#define MA_PA_CHANNEL_POSITION_AUX23 35 +#define MA_PA_CHANNEL_POSITION_AUX24 36 +#define MA_PA_CHANNEL_POSITION_AUX25 37 +#define MA_PA_CHANNEL_POSITION_AUX26 38 +#define MA_PA_CHANNEL_POSITION_AUX27 39 +#define MA_PA_CHANNEL_POSITION_AUX28 40 +#define MA_PA_CHANNEL_POSITION_AUX29 41 +#define MA_PA_CHANNEL_POSITION_AUX30 42 +#define MA_PA_CHANNEL_POSITION_AUX31 43 +#define MA_PA_CHANNEL_POSITION_TOP_CENTER 44 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT 45 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT 46 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER 47 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT 48 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT 49 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER 50 +#define MA_PA_CHANNEL_POSITION_LEFT MA_PA_CHANNEL_POSITION_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_RIGHT MA_PA_CHANNEL_POSITION_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_CENTER MA_PA_CHANNEL_POSITION_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_SUBWOOFER MA_PA_CHANNEL_POSITION_LFE + +typedef int ma_pa_channel_map_def_t; +#define MA_PA_CHANNEL_MAP_AIFF 0 +#define MA_PA_CHANNEL_MAP_ALSA 1 +#define MA_PA_CHANNEL_MAP_AUX 2 +#define MA_PA_CHANNEL_MAP_WAVEEX 3 +#define MA_PA_CHANNEL_MAP_OSS 4 +#define MA_PA_CHANNEL_MAP_DEFAULT MA_PA_CHANNEL_MAP_AIFF + +typedef int ma_pa_sample_format_t; +#define MA_PA_SAMPLE_INVALID -1 +#define MA_PA_SAMPLE_U8 0 +#define MA_PA_SAMPLE_ALAW 1 +#define MA_PA_SAMPLE_ULAW 2 +#define MA_PA_SAMPLE_S16LE 3 +#define MA_PA_SAMPLE_S16BE 4 +#define MA_PA_SAMPLE_FLOAT32LE 5 +#define MA_PA_SAMPLE_FLOAT32BE 6 +#define MA_PA_SAMPLE_S32LE 7 +#define MA_PA_SAMPLE_S32BE 8 +#define MA_PA_SAMPLE_S24LE 9 +#define MA_PA_SAMPLE_S24BE 10 +#define MA_PA_SAMPLE_S24_32LE 11 +#define MA_PA_SAMPLE_S24_32BE 12 + +typedef struct ma_pa_mainloop ma_pa_mainloop; +typedef struct ma_pa_threaded_mainloop ma_pa_threaded_mainloop; +typedef struct ma_pa_mainloop_api ma_pa_mainloop_api; +typedef struct ma_pa_context ma_pa_context; +typedef struct ma_pa_operation ma_pa_operation; +typedef struct ma_pa_stream ma_pa_stream; +typedef struct ma_pa_spawn_api ma_pa_spawn_api; + +typedef struct +{ + ma_uint32 maxlength; + ma_uint32 tlength; + ma_uint32 prebuf; + ma_uint32 minreq; + ma_uint32 fragsize; +} ma_pa_buffer_attr; + +typedef struct +{ + ma_uint8 channels; + ma_pa_channel_position_t map[MA_PA_CHANNELS_MAX]; +} ma_pa_channel_map; + +typedef struct +{ + ma_uint8 channels; + ma_uint32 values[MA_PA_CHANNELS_MAX]; +} ma_pa_cvolume; + +typedef struct +{ + ma_pa_sample_format_t format; + ma_uint32 rate; + ma_uint8 channels; +} ma_pa_sample_spec; + +typedef struct +{ + const char* name; + ma_uint32 index; + const char* description; + ma_pa_sample_spec sample_spec; + ma_pa_channel_map channel_map; + ma_uint32 owner_module; + ma_pa_cvolume volume; + int mute; + ma_uint32 monitor_source; + const char* monitor_source_name; + ma_uint64 latency; + const char* driver; + ma_pa_sink_flags_t flags; + void* proplist; + ma_uint64 configured_latency; + ma_uint32 base_volume; + ma_pa_sink_state_t state; + ma_uint32 n_volume_steps; + ma_uint32 card; + ma_uint32 n_ports; + void** ports; + void* active_port; + ma_uint8 n_formats; + void** formats; +} ma_pa_sink_info; + +typedef struct +{ + const char *name; + ma_uint32 index; + const char *description; + ma_pa_sample_spec sample_spec; + ma_pa_channel_map channel_map; + ma_uint32 owner_module; + ma_pa_cvolume volume; + int mute; + ma_uint32 monitor_of_sink; + const char *monitor_of_sink_name; + ma_uint64 latency; + const char *driver; + ma_pa_source_flags_t flags; + void* proplist; + ma_uint64 configured_latency; + ma_uint32 base_volume; + ma_pa_source_state_t state; + ma_uint32 n_volume_steps; + ma_uint32 card; + ma_uint32 n_ports; + void** ports; + void* active_port; + ma_uint8 n_formats; + void** formats; +} ma_pa_source_info; + +typedef void (* ma_pa_context_notify_cb_t)(ma_pa_context* c, void* userdata); +typedef void (* ma_pa_sink_info_cb_t) (ma_pa_context* c, const ma_pa_sink_info* i, int eol, void* userdata); +typedef void (* ma_pa_source_info_cb_t) (ma_pa_context* c, const ma_pa_source_info* i, int eol, void* userdata); +typedef void (* ma_pa_stream_success_cb_t)(ma_pa_stream* s, int success, void* userdata); +typedef void (* ma_pa_stream_request_cb_t)(ma_pa_stream* s, size_t nbytes, void* userdata); +typedef void (* ma_pa_stream_notify_cb_t) (ma_pa_stream* s, void* userdata); +typedef void (* ma_pa_free_cb_t) (void* p); +#endif + + +typedef ma_pa_mainloop* (* ma_pa_mainloop_new_proc) (void); +typedef void (* ma_pa_mainloop_free_proc) (ma_pa_mainloop* m); +typedef void (* ma_pa_mainloop_quit_proc) (ma_pa_mainloop* m, int retval); +typedef ma_pa_mainloop_api* (* ma_pa_mainloop_get_api_proc) (ma_pa_mainloop* m); +typedef int (* ma_pa_mainloop_iterate_proc) (ma_pa_mainloop* m, int block, int* retval); +typedef void (* ma_pa_mainloop_wakeup_proc) (ma_pa_mainloop* m); +typedef ma_pa_threaded_mainloop* (* ma_pa_threaded_mainloop_new_proc) (void); +typedef void (* ma_pa_threaded_mainloop_free_proc) (ma_pa_threaded_mainloop* m); +typedef int (* ma_pa_threaded_mainloop_start_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_stop_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_lock_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_unlock_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_wait_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_signal_proc) (ma_pa_threaded_mainloop* m, int wait_for_accept); +typedef void (* ma_pa_threaded_mainloop_accept_proc) (ma_pa_threaded_mainloop* m); +typedef int (* ma_pa_threaded_mainloop_get_retval_proc) (ma_pa_threaded_mainloop* m); +typedef ma_pa_mainloop_api* (* ma_pa_threaded_mainloop_get_api_proc) (ma_pa_threaded_mainloop* m); +typedef int (* ma_pa_threaded_mainloop_in_thread_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_set_name_proc) (ma_pa_threaded_mainloop* m, const char* name); +typedef ma_pa_context* (* ma_pa_context_new_proc) (ma_pa_mainloop_api* mainloop, const char* name); +typedef void (* ma_pa_context_unref_proc) (ma_pa_context* c); +typedef int (* ma_pa_context_connect_proc) (ma_pa_context* c, const char* server, ma_pa_context_flags_t flags, const ma_pa_spawn_api* api); +typedef void (* ma_pa_context_disconnect_proc) (ma_pa_context* c); +typedef void (* ma_pa_context_set_state_callback_proc) (ma_pa_context* c, ma_pa_context_notify_cb_t cb, void* userdata); +typedef ma_pa_context_state_t (* ma_pa_context_get_state_proc) (ma_pa_context* c); +typedef ma_pa_operation* (* ma_pa_context_get_sink_info_list_proc) (ma_pa_context* c, ma_pa_sink_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_source_info_list_proc) (ma_pa_context* c, ma_pa_source_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_sink_info_by_name_proc) (ma_pa_context* c, const char* name, ma_pa_sink_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_source_info_by_name_proc)(ma_pa_context* c, const char* name, ma_pa_source_info_cb_t cb, void* userdata); +typedef void (* ma_pa_operation_unref_proc) (ma_pa_operation* o); +typedef ma_pa_operation_state_t (* ma_pa_operation_get_state_proc) (ma_pa_operation* o); +typedef ma_pa_channel_map* (* ma_pa_channel_map_init_extend_proc) (ma_pa_channel_map* m, unsigned channels, ma_pa_channel_map_def_t def); +typedef int (* ma_pa_channel_map_valid_proc) (const ma_pa_channel_map* m); +typedef int (* ma_pa_channel_map_compatible_proc) (const ma_pa_channel_map* m, const ma_pa_sample_spec* ss); +typedef ma_pa_stream* (* ma_pa_stream_new_proc) (ma_pa_context* c, const char* name, const ma_pa_sample_spec* ss, const ma_pa_channel_map* map); +typedef void (* ma_pa_stream_unref_proc) (ma_pa_stream* s); +typedef int (* ma_pa_stream_connect_playback_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags, const ma_pa_cvolume* volume, ma_pa_stream* sync_stream); +typedef int (* ma_pa_stream_connect_record_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags); +typedef int (* ma_pa_stream_disconnect_proc) (ma_pa_stream* s); +typedef ma_pa_stream_state_t (* ma_pa_stream_get_state_proc) (ma_pa_stream* s); +typedef const ma_pa_sample_spec* (* ma_pa_stream_get_sample_spec_proc) (ma_pa_stream* s); +typedef const ma_pa_channel_map* (* ma_pa_stream_get_channel_map_proc) (ma_pa_stream* s); +typedef const ma_pa_buffer_attr* (* ma_pa_stream_get_buffer_attr_proc) (ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_set_buffer_attr_proc) (ma_pa_stream* s, const ma_pa_buffer_attr* attr, ma_pa_stream_success_cb_t cb, void* userdata); +typedef const char* (* ma_pa_stream_get_device_name_proc) (ma_pa_stream* s); +typedef void (* ma_pa_stream_set_write_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata); +typedef void (* ma_pa_stream_set_read_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata); +typedef void (* ma_pa_stream_set_suspended_callback_proc) (ma_pa_stream* s, ma_pa_stream_notify_cb_t cb, void* userdata); +typedef void (* ma_pa_stream_set_moved_callback_proc) (ma_pa_stream* s, ma_pa_stream_notify_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_is_suspended_proc) (const ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_flush_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_stream_drain_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_is_corked_proc) (ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_cork_proc) (ma_pa_stream* s, int b, ma_pa_stream_success_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_stream_trigger_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_begin_write_proc) (ma_pa_stream* s, void** data, size_t* nbytes); +typedef int (* ma_pa_stream_write_proc) (ma_pa_stream* s, const void* data, size_t nbytes, ma_pa_free_cb_t free_cb, int64_t offset, ma_pa_seek_mode_t seek); +typedef int (* ma_pa_stream_peek_proc) (ma_pa_stream* s, const void** data, size_t* nbytes); +typedef int (* ma_pa_stream_drop_proc) (ma_pa_stream* s); +typedef size_t (* ma_pa_stream_writable_size_proc) (ma_pa_stream* s); +typedef size_t (* ma_pa_stream_readable_size_proc) (ma_pa_stream* s); + +typedef struct +{ + ma_uint32 count; + ma_uint32 capacity; + ma_device_info* pInfo; +} ma_pulse_device_enum_data; + +static ma_result ma_result_from_pulse(int result) +{ + if (result < 0) { + return MA_ERROR; + } + + switch (result) { + case MA_PA_OK: return MA_SUCCESS; + case MA_PA_ERR_ACCESS: return MA_ACCESS_DENIED; + case MA_PA_ERR_INVALID: return MA_INVALID_ARGS; + case MA_PA_ERR_NOENTITY: return MA_NO_DEVICE; + default: return MA_ERROR; + } +} + +#if 0 +static ma_pa_sample_format_t ma_format_to_pulse(ma_format format) +{ + if (ma_is_little_endian()) { + switch (format) { + case ma_format_s16: return MA_PA_SAMPLE_S16LE; + case ma_format_s24: return MA_PA_SAMPLE_S24LE; + case ma_format_s32: return MA_PA_SAMPLE_S32LE; + case ma_format_f32: return MA_PA_SAMPLE_FLOAT32LE; + default: break; + } + } else { + switch (format) { + case ma_format_s16: return MA_PA_SAMPLE_S16BE; + case ma_format_s24: return MA_PA_SAMPLE_S24BE; + case ma_format_s32: return MA_PA_SAMPLE_S32BE; + case ma_format_f32: return MA_PA_SAMPLE_FLOAT32BE; + default: break; + } + } + + /* Endian agnostic. */ + switch (format) { + case ma_format_u8: return MA_PA_SAMPLE_U8; + default: return MA_PA_SAMPLE_INVALID; + } +} +#endif + +static ma_format ma_format_from_pulse(ma_pa_sample_format_t format) +{ + if (ma_is_little_endian()) { + switch (format) { + case MA_PA_SAMPLE_S16LE: return ma_format_s16; + case MA_PA_SAMPLE_S24LE: return ma_format_s24; + case MA_PA_SAMPLE_S32LE: return ma_format_s32; + case MA_PA_SAMPLE_FLOAT32LE: return ma_format_f32; + default: break; + } + } else { + switch (format) { + case MA_PA_SAMPLE_S16BE: return ma_format_s16; + case MA_PA_SAMPLE_S24BE: return ma_format_s24; + case MA_PA_SAMPLE_S32BE: return ma_format_s32; + case MA_PA_SAMPLE_FLOAT32BE: return ma_format_f32; + default: break; + } + } + + /* Endian agnostic. */ + switch (format) { + case MA_PA_SAMPLE_U8: return ma_format_u8; + default: return ma_format_unknown; + } +} + +static ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position) +{ + switch (position) + { + case MA_PA_CHANNEL_POSITION_INVALID: return MA_CHANNEL_NONE; + case MA_PA_CHANNEL_POSITION_MONO: return MA_CHANNEL_MONO; + case MA_PA_CHANNEL_POSITION_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case MA_PA_CHANNEL_POSITION_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case MA_PA_CHANNEL_POSITION_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case MA_PA_CHANNEL_POSITION_REAR_CENTER: return MA_CHANNEL_BACK_CENTER; + case MA_PA_CHANNEL_POSITION_REAR_LEFT: return MA_CHANNEL_BACK_LEFT; + case MA_PA_CHANNEL_POSITION_REAR_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case MA_PA_CHANNEL_POSITION_LFE: return MA_CHANNEL_LFE; + case MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case MA_PA_CHANNEL_POSITION_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case MA_PA_CHANNEL_POSITION_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case MA_PA_CHANNEL_POSITION_AUX0: return MA_CHANNEL_AUX_0; + case MA_PA_CHANNEL_POSITION_AUX1: return MA_CHANNEL_AUX_1; + case MA_PA_CHANNEL_POSITION_AUX2: return MA_CHANNEL_AUX_2; + case MA_PA_CHANNEL_POSITION_AUX3: return MA_CHANNEL_AUX_3; + case MA_PA_CHANNEL_POSITION_AUX4: return MA_CHANNEL_AUX_4; + case MA_PA_CHANNEL_POSITION_AUX5: return MA_CHANNEL_AUX_5; + case MA_PA_CHANNEL_POSITION_AUX6: return MA_CHANNEL_AUX_6; + case MA_PA_CHANNEL_POSITION_AUX7: return MA_CHANNEL_AUX_7; + case MA_PA_CHANNEL_POSITION_AUX8: return MA_CHANNEL_AUX_8; + case MA_PA_CHANNEL_POSITION_AUX9: return MA_CHANNEL_AUX_9; + case MA_PA_CHANNEL_POSITION_AUX10: return MA_CHANNEL_AUX_10; + case MA_PA_CHANNEL_POSITION_AUX11: return MA_CHANNEL_AUX_11; + case MA_PA_CHANNEL_POSITION_AUX12: return MA_CHANNEL_AUX_12; + case MA_PA_CHANNEL_POSITION_AUX13: return MA_CHANNEL_AUX_13; + case MA_PA_CHANNEL_POSITION_AUX14: return MA_CHANNEL_AUX_14; + case MA_PA_CHANNEL_POSITION_AUX15: return MA_CHANNEL_AUX_15; + case MA_PA_CHANNEL_POSITION_AUX16: return MA_CHANNEL_AUX_16; + case MA_PA_CHANNEL_POSITION_AUX17: return MA_CHANNEL_AUX_17; + case MA_PA_CHANNEL_POSITION_AUX18: return MA_CHANNEL_AUX_18; + case MA_PA_CHANNEL_POSITION_AUX19: return MA_CHANNEL_AUX_19; + case MA_PA_CHANNEL_POSITION_AUX20: return MA_CHANNEL_AUX_20; + case MA_PA_CHANNEL_POSITION_AUX21: return MA_CHANNEL_AUX_21; + case MA_PA_CHANNEL_POSITION_AUX22: return MA_CHANNEL_AUX_22; + case MA_PA_CHANNEL_POSITION_AUX23: return MA_CHANNEL_AUX_23; + case MA_PA_CHANNEL_POSITION_AUX24: return MA_CHANNEL_AUX_24; + case MA_PA_CHANNEL_POSITION_AUX25: return MA_CHANNEL_AUX_25; + case MA_PA_CHANNEL_POSITION_AUX26: return MA_CHANNEL_AUX_26; + case MA_PA_CHANNEL_POSITION_AUX27: return MA_CHANNEL_AUX_27; + case MA_PA_CHANNEL_POSITION_AUX28: return MA_CHANNEL_AUX_28; + case MA_PA_CHANNEL_POSITION_AUX29: return MA_CHANNEL_AUX_29; + case MA_PA_CHANNEL_POSITION_AUX30: return MA_CHANNEL_AUX_30; + case MA_PA_CHANNEL_POSITION_AUX31: return MA_CHANNEL_AUX_31; + case MA_PA_CHANNEL_POSITION_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + case MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + default: return MA_CHANNEL_NONE; + } +} + +#if 0 +static ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position) +{ + switch (position) + { + case MA_CHANNEL_NONE: return MA_PA_CHANNEL_POSITION_INVALID; + case MA_CHANNEL_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_CENTER; + case MA_CHANNEL_LFE: return MA_PA_CHANNEL_POSITION_LFE; + case MA_CHANNEL_BACK_LEFT: return MA_PA_CHANNEL_POSITION_REAR_LEFT; + case MA_CHANNEL_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_REAR_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return MA_PA_CHANNEL_POSITION_REAR_CENTER; + case MA_CHANNEL_SIDE_LEFT: return MA_PA_CHANNEL_POSITION_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return MA_PA_CHANNEL_POSITION_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return MA_PA_CHANNEL_POSITION_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT; + case MA_CHANNEL_19: return MA_PA_CHANNEL_POSITION_AUX18; + case MA_CHANNEL_20: return MA_PA_CHANNEL_POSITION_AUX19; + case MA_CHANNEL_21: return MA_PA_CHANNEL_POSITION_AUX20; + case MA_CHANNEL_22: return MA_PA_CHANNEL_POSITION_AUX21; + case MA_CHANNEL_23: return MA_PA_CHANNEL_POSITION_AUX22; + case MA_CHANNEL_24: return MA_PA_CHANNEL_POSITION_AUX23; + case MA_CHANNEL_25: return MA_PA_CHANNEL_POSITION_AUX24; + case MA_CHANNEL_26: return MA_PA_CHANNEL_POSITION_AUX25; + case MA_CHANNEL_27: return MA_PA_CHANNEL_POSITION_AUX26; + case MA_CHANNEL_28: return MA_PA_CHANNEL_POSITION_AUX27; + case MA_CHANNEL_29: return MA_PA_CHANNEL_POSITION_AUX28; + case MA_CHANNEL_30: return MA_PA_CHANNEL_POSITION_AUX29; + case MA_CHANNEL_31: return MA_PA_CHANNEL_POSITION_AUX30; + case MA_CHANNEL_32: return MA_PA_CHANNEL_POSITION_AUX31; + default: return (ma_pa_channel_position_t)position; + } +} +#endif + +static ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_pa_operation* pOP) +{ + int resultPA; + ma_pa_operation_state_t state; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pOP != NULL); + + for (;;) { + state = ((ma_pa_operation_get_state_proc)pContext->pulse.pa_operation_get_state)(pOP); + if (state != MA_PA_OPERATION_RUNNING) { + break; /* Done. */ + } + + resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL); + if (resultPA < 0) { + return ma_result_from_pulse(resultPA); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_wait_for_operation_and_unref__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_pa_operation* pOP) +{ + ma_result result; + + if (pOP == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + + return result; +} + +static ma_result ma_wait_for_pa_context_to_connect__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_ptr pPulseContext) +{ + int resultPA; + ma_pa_context_state_t state; + + for (;;) { + state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)((ma_pa_context*)pPulseContext); + if (state == MA_PA_CONTEXT_READY) { + break; /* Done. */ + } + + if (state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio context."); + return MA_ERROR; + } + + resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL); + if (resultPA < 0) { + return ma_result_from_pulse(resultPA); + } + } + + /* Should never get here. */ + return MA_SUCCESS; +} + +static ma_result ma_wait_for_pa_stream_to_connect__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_ptr pStream) +{ + int resultPA; + ma_pa_stream_state_t state; + + for (;;) { + state = ((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pStream); + if (state == MA_PA_STREAM_READY) { + break; /* Done. */ + } + + if (state == MA_PA_STREAM_FAILED || state == MA_PA_STREAM_TERMINATED) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio stream."); + return MA_ERROR; + } + + resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL); + if (resultPA < 0) { + return ma_result_from_pulse(resultPA); + } + } + + return MA_SUCCESS; +} + + +static ma_result ma_init_pa_mainloop_and_pa_context__pulse(ma_context* pContext, const char* pApplicationName, const char* pServerName, ma_bool32 tryAutoSpawn, ma_ptr* ppMainLoop, ma_ptr* ppPulseContext) +{ + ma_result result; + ma_ptr pMainLoop; + ma_ptr pPulseContext; + + MA_ASSERT(ppMainLoop != NULL); + MA_ASSERT(ppPulseContext != NULL); + + /* The PulseAudio context maps well to miniaudio's notion of a context. The pa_context object will be initialized as part of the ma_context. */ + pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)(); + if (pMainLoop == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create mainloop."); + return MA_FAILED_TO_INIT_BACKEND; + } + + pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)((ma_pa_mainloop*)pMainLoop), pApplicationName); + if (pPulseContext == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio context."); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop)); + return MA_FAILED_TO_INIT_BACKEND; + } + + /* Now we need to connect to the context. Everything is asynchronous so we need to wait for it to connect before returning. */ + result = ma_result_from_pulse(((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)((ma_pa_context*)pPulseContext, pServerName, (tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL)); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio context."); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop)); + return result; + } + + /* Since ma_context_init() runs synchronously we need to wait for the PulseAudio context to connect before we return. */ + result = ma_wait_for_pa_context_to_connect__pulse(pContext, pMainLoop, pPulseContext); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Waiting for connection failed."); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop)); + return result; + } + + *ppMainLoop = pMainLoop; + *ppPulseContext = pPulseContext; + + return MA_SUCCESS; +} + + +static void ma_device_sink_info_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_pa_sink_info* pInfoOut; + + if (endOfList > 0) { + return; + } + + /* + There has been a report that indicates that pInfo can be null which results + in a null pointer dereference below. We'll check for this for safety. + */ + if (pInfo == NULL) { + return; + } + + pInfoOut = (ma_pa_sink_info*)pUserData; + MA_ASSERT(pInfoOut != NULL); + + *pInfoOut = *pInfo; + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_pa_source_info* pInfoOut; + + if (endOfList > 0) { + return; + } + + /* + There has been a report that indicates that pInfo can be null which results + in a null pointer dereference below. We'll check for this for safety. + */ + if (pInfo == NULL) { + return; + } + + pInfoOut = (ma_pa_source_info*)pUserData; + MA_ASSERT(pInfoOut != NULL); + + *pInfoOut = *pInfo; + + (void)pPulseContext; /* Unused. */ +} + +#if 0 +static void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_device* pDevice; + + if (endOfList > 0) { + return; + } + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), pInfo->description, (size_t)-1); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_device* pDevice; + + if (endOfList > 0) { + return; + } + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), pInfo->description, (size_t)-1); + + (void)pPulseContext; /* Unused. */ +} +#endif + +static ma_result ma_context_get_sink_info__pulse(ma_context* pContext, const char* pDeviceName, ma_pa_sink_info* pSinkInfo) +{ + ma_pa_operation* pOP; + + pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pContext->pulse.pPulseContext, pDeviceName, ma_device_sink_info_callback, pSinkInfo); + if (pOP == NULL) { + return MA_ERROR; + } + + return ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP); +} + +static ma_result ma_context_get_source_info__pulse(ma_context* pContext, const char* pDeviceName, ma_pa_source_info* pSourceInfo) +{ + ma_pa_operation* pOP; + + pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pContext->pulse.pPulseContext, pDeviceName, ma_device_source_info_callback, pSourceInfo); + if (pOP == NULL) { + return MA_ERROR; + } + + return ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP); +} + +static ma_result ma_context_get_default_device_index__pulse(ma_context* pContext, ma_device_type deviceType, ma_uint32* pIndex) +{ + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pIndex != NULL); + + if (pIndex != NULL) { + *pIndex = (ma_uint32)-1; + } + + if (deviceType == ma_device_type_playback) { + ma_pa_sink_info sinkInfo; + result = ma_context_get_sink_info__pulse(pContext, NULL, &sinkInfo); + if (result != MA_SUCCESS) { + return result; + } + + if (pIndex != NULL) { + *pIndex = sinkInfo.index; + } + } + + if (deviceType == ma_device_type_capture) { + ma_pa_source_info sourceInfo; + result = ma_context_get_source_info__pulse(pContext, NULL, &sourceInfo); + if (result != MA_SUCCESS) { + return result; + } + + if (pIndex != NULL) { + *pIndex = sourceInfo.index; + } + } + + return MA_SUCCESS; +} + + +typedef struct +{ + ma_context* pContext; + ma_enum_devices_callback_proc callback; + void* pUserData; + ma_bool32 isTerminated; + ma_uint32 defaultDeviceIndexPlayback; + ma_uint32 defaultDeviceIndexCapture; +} ma_context_enumerate_devices_callback_data__pulse; + +static void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; + ma_device_info deviceInfo; + + MA_ASSERT(pData != NULL); + + if (endOfList || pData->isTerminated) { + return; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* The name from PulseAudio is the ID for miniaudio. */ + if (pSinkInfo->name != NULL) { + ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1); + } + + /* The description from PulseAudio is the name for miniaudio. */ + if (pSinkInfo->description != NULL) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1); + } + + if (pSinkInfo->index == pData->defaultDeviceIndexPlayback) { + deviceInfo.isDefault = MA_TRUE; + } + + pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_playback, &deviceInfo, pData->pUserData); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSourceInfo, int endOfList, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; + ma_device_info deviceInfo; + + MA_ASSERT(pData != NULL); + + if (endOfList || pData->isTerminated) { + return; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* The name from PulseAudio is the ID for miniaudio. */ + if (pSourceInfo->name != NULL) { + ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSourceInfo->name, (size_t)-1); + } + + /* The description from PulseAudio is the name for miniaudio. */ + if (pSourceInfo->description != NULL) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSourceInfo->description, (size_t)-1); + } + + if (pSourceInfo->index == pData->defaultDeviceIndexCapture) { + deviceInfo.isDefault = MA_TRUE; + } + + pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_capture, &deviceInfo, pData->pUserData); + + (void)pPulseContext; /* Unused. */ +} + +static ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result = MA_SUCCESS; + ma_context_enumerate_devices_callback_data__pulse callbackData; + ma_pa_operation* pOP = NULL; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + callbackData.pContext = pContext; + callbackData.callback = callback; + callbackData.pUserData = pUserData; + callbackData.isTerminated = MA_FALSE; + callbackData.defaultDeviceIndexPlayback = (ma_uint32)-1; + callbackData.defaultDeviceIndexCapture = (ma_uint32)-1; + + /* We need to get the index of the default devices. */ + ma_context_get_default_device_index__pulse(pContext, ma_device_type_playback, &callbackData.defaultDeviceIndexPlayback); + ma_context_get_default_device_index__pulse(pContext, ma_device_type_capture, &callbackData.defaultDeviceIndexCapture); + + /* Playback. */ + if (!callbackData.isTerminated) { + pOP = ((ma_pa_context_get_sink_info_list_proc)pContext->pulse.pa_context_get_sink_info_list)((ma_pa_context*)(pContext->pulse.pPulseContext), ma_context_enumerate_devices_sink_callback__pulse, &callbackData); + if (pOP == NULL) { + result = MA_ERROR; + goto done; + } + + result = ma_wait_for_operation__pulse(pContext, pContext->pulse.pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + + if (result != MA_SUCCESS) { + goto done; + } + } + + + /* Capture. */ + if (!callbackData.isTerminated) { + pOP = ((ma_pa_context_get_source_info_list_proc)pContext->pulse.pa_context_get_source_info_list)((ma_pa_context*)(pContext->pulse.pPulseContext), ma_context_enumerate_devices_source_callback__pulse, &callbackData); + if (pOP == NULL) { + result = MA_ERROR; + goto done; + } + + result = ma_wait_for_operation__pulse(pContext, pContext->pulse.pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + + if (result != MA_SUCCESS) { + goto done; + } + } + +done: + return result; +} + + +typedef struct +{ + ma_device_info* pDeviceInfo; + ma_uint32 defaultDeviceIndex; + ma_bool32 foundDevice; +} ma_context_get_device_info_callback_data__pulse; + +static void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; + + if (endOfList > 0) { + return; + } + + MA_ASSERT(pData != NULL); + pData->foundDevice = MA_TRUE; + + if (pInfo->name != NULL) { + ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1); + } + + if (pInfo->description != NULL) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1); + } + + /* + We're just reporting a single data format here. I think technically PulseAudio might support + all formats, but I don't trust that PulseAudio will do *anything* right, so I'm just going to + report the "native" device format. + */ + pData->pDeviceInfo->nativeDataFormats[0].format = ma_format_from_pulse(pInfo->sample_spec.format); + pData->pDeviceInfo->nativeDataFormats[0].channels = pInfo->sample_spec.channels; + pData->pDeviceInfo->nativeDataFormats[0].sampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->nativeDataFormats[0].flags = 0; + pData->pDeviceInfo->nativeDataFormatCount = 1; + + if (pData->defaultDeviceIndex == pInfo->index) { + pData->pDeviceInfo->isDefault = MA_TRUE; + } + + (void)pPulseContext; /* Unused. */ +} + +static void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; + + if (endOfList > 0) { + return; + } + + MA_ASSERT(pData != NULL); + pData->foundDevice = MA_TRUE; + + if (pInfo->name != NULL) { + ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1); + } + + if (pInfo->description != NULL) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1); + } + + /* + We're just reporting a single data format here. I think technically PulseAudio might support + all formats, but I don't trust that PulseAudio will do *anything* right, so I'm just going to + report the "native" device format. + */ + pData->pDeviceInfo->nativeDataFormats[0].format = ma_format_from_pulse(pInfo->sample_spec.format); + pData->pDeviceInfo->nativeDataFormats[0].channels = pInfo->sample_spec.channels; + pData->pDeviceInfo->nativeDataFormats[0].sampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->nativeDataFormats[0].flags = 0; + pData->pDeviceInfo->nativeDataFormatCount = 1; + + if (pData->defaultDeviceIndex == pInfo->index) { + pData->pDeviceInfo->isDefault = MA_TRUE; + } + + (void)pPulseContext; /* Unused. */ +} + +static ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result = MA_SUCCESS; + ma_context_get_device_info_callback_data__pulse callbackData; + ma_pa_operation* pOP = NULL; + const char* pDeviceName = NULL; + + MA_ASSERT(pContext != NULL); + + callbackData.pDeviceInfo = pDeviceInfo; + callbackData.foundDevice = MA_FALSE; + + if (pDeviceID != NULL) { + pDeviceName = pDeviceID->pulse; + } else { + pDeviceName = NULL; + } + + result = ma_context_get_default_device_index__pulse(pContext, deviceType, &callbackData.defaultDeviceIndex); + + if (deviceType == ma_device_type_playback) { + pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)(pContext->pulse.pPulseContext), pDeviceName, ma_context_get_device_info_sink_callback__pulse, &callbackData); + } else { + pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)(pContext->pulse.pPulseContext), pDeviceName, ma_context_get_device_info_source_callback__pulse, &callbackData); + } + + if (pOP != NULL) { + ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP); + } else { + result = MA_ERROR; + goto done; + } + + if (!callbackData.foundDevice) { + result = MA_NO_DEVICE; + goto done; + } + +done: + return result; +} + +static ma_result ma_device_uninit__pulse(ma_device* pDevice) +{ + ma_context* pContext; + + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } + + if (pDevice->type == ma_device_type_duplex) { + ma_duplex_rb_uninit(&pDevice->duplexRB); + } + + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); + + return MA_SUCCESS; +} + +static ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 periodSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss) +{ + ma_pa_buffer_attr attr; + attr.maxlength = periodSizeInFrames * periods * ma_get_bytes_per_frame(ma_format_from_pulse(ss->format), ss->channels); + attr.tlength = attr.maxlength / periods; + attr.prebuf = (ma_uint32)-1; + attr.minreq = (ma_uint32)-1; + attr.fragsize = attr.maxlength / periods; + + return attr; +} + +static ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap) +{ + static int g_StreamCounter = 0; + char actualStreamName[256]; + + if (pStreamName != NULL) { + ma_strncpy_s(actualStreamName, sizeof(actualStreamName), pStreamName, (size_t)-1); + } else { + ma_strcpy_s(actualStreamName, sizeof(actualStreamName), "miniaudio:"); + ma_itoa_s(g_StreamCounter, actualStreamName + 8, sizeof(actualStreamName)-8, 10); /* 8 = strlen("miniaudio:") */ + } + g_StreamCounter += 1; + + return ((ma_pa_stream_new_proc)pDevice->pContext->pulse.pa_stream_new)((ma_pa_context*)pDevice->pulse.pPulseContext, actualStreamName, ss, cmap); +} + + +static void ma_device_on_read__pulse(ma_pa_stream* pStream, size_t byteCount, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_uint32 bpf; + ma_uint32 deviceState; + ma_uint64 frameCount; + ma_uint64 framesProcessed; + + MA_ASSERT(pDevice != NULL); + + /* + Don't do anything if the device isn't initialized yet. Yes, this can happen because PulseAudio + can fire this callback before the stream has even started. Ridiculous. + */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) { + return; + } + + bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + MA_ASSERT(bpf > 0); + + frameCount = byteCount / bpf; + framesProcessed = 0; + + while (ma_device_get_state(pDevice) == ma_device_state_started && framesProcessed < frameCount) { + const void* pMappedPCMFrames; + size_t bytesMapped; + ma_uint64 framesMapped; + + int pulseResult = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)(pStream, &pMappedPCMFrames, &bytesMapped); + if (pulseResult < 0) { + break; /* Failed to map. Abort. */ + } + + framesMapped = bytesMapped / bpf; + if (framesMapped > 0) { + if (pMappedPCMFrames != NULL) { + ma_device_handle_backend_data_callback(pDevice, NULL, pMappedPCMFrames, framesMapped); + } else { + /* It's a hole. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[PulseAudio] ma_device_on_read__pulse: Hole.\n"); + } + + pulseResult = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)(pStream); + if (pulseResult < 0) { + break; /* Failed to drop the buffer. */ + } + + framesProcessed += framesMapped; + + } else { + /* Nothing was mapped. Just abort. */ + break; + } + } +} + +static ma_result ma_device_write_to_stream__pulse(ma_device* pDevice, ma_pa_stream* pStream, ma_uint64* pFramesProcessed) +{ + ma_result result = MA_SUCCESS; + ma_uint64 framesProcessed = 0; + size_t bytesMapped; + ma_uint32 bpf; + ma_uint32 deviceState; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pStream != NULL); + + bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + MA_ASSERT(bpf > 0); + + deviceState = ma_device_get_state(pDevice); + + bytesMapped = ((ma_pa_stream_writable_size_proc)pDevice->pContext->pulse.pa_stream_writable_size)(pStream); + if (bytesMapped != (size_t)-1) { + if (bytesMapped > 0) { + ma_uint64 framesMapped; + void* pMappedPCMFrames; + int pulseResult = ((ma_pa_stream_begin_write_proc)pDevice->pContext->pulse.pa_stream_begin_write)(pStream, &pMappedPCMFrames, &bytesMapped); + if (pulseResult < 0) { + result = ma_result_from_pulse(pulseResult); + goto done; + } + + framesMapped = bytesMapped / bpf; + + if (deviceState == ma_device_state_started || deviceState == ma_device_state_starting) { /* Check for starting state just in case this is being used to do the initial fill. */ + ma_device_handle_backend_data_callback(pDevice, pMappedPCMFrames, NULL, framesMapped); + } else { + /* Device is not started. Write silence. */ + ma_silence_pcm_frames(pMappedPCMFrames, framesMapped, pDevice->playback.format, pDevice->playback.channels); + } + + pulseResult = ((ma_pa_stream_write_proc)pDevice->pContext->pulse.pa_stream_write)(pStream, pMappedPCMFrames, bytesMapped, NULL, 0, MA_PA_SEEK_RELATIVE); + if (pulseResult < 0) { + result = ma_result_from_pulse(pulseResult); + goto done; /* Failed to write data to stream. */ + } + + framesProcessed += framesMapped; + } else { + result = MA_SUCCESS; /* No data available for writing. */ + goto done; + } + } else { + result = MA_ERROR; /* Failed to retrieve the writable size. Abort. */ + goto done; + } + +done: + if (pFramesProcessed != NULL) { + *pFramesProcessed = framesProcessed; + } + + return result; +} + +static void ma_device_on_write__pulse(ma_pa_stream* pStream, size_t byteCount, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_uint32 bpf; + ma_uint64 frameCount; + ma_uint64 framesProcessed; + ma_uint32 deviceState; + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* + Don't do anything if the device isn't initialized yet. Yes, this can happen because PulseAudio + can fire this callback before the stream has even started. Ridiculous. + */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) { + return; + } + + bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + MA_ASSERT(bpf > 0); + + frameCount = byteCount / bpf; + framesProcessed = 0; + + while (framesProcessed < frameCount) { + ma_uint64 framesProcessedThisIteration; + + /* Don't keep trying to process frames if the device isn't started. */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) { + break; + } + + result = ma_device_write_to_stream__pulse(pDevice, pStream, &framesProcessedThisIteration); + if (result != MA_SUCCESS) { + break; + } + + framesProcessed += framesProcessedThisIteration; + } +} + +static void ma_device_on_suspended__pulse(ma_pa_stream* pStream, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + int suspended; + + (void)pStream; + + suspended = ((ma_pa_stream_is_suspended_proc)pDevice->pContext->pulse.pa_stream_is_suspended)(pStream); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. pa_stream_is_suspended() returned %d.\n", suspended); + + if (suspended < 0) { + return; + } + + if (suspended == 1) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. Suspended.\n"); + ma_device__on_notification_stopped(pDevice); + } else { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. Resumed.\n"); + ma_device__on_notification_started(pDevice); + } +} + +static void ma_device_on_rerouted__pulse(ma_pa_stream* pStream, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + + (void)pStream; + (void)pUserData; + + ma_device__on_notification_rerouted(pDevice); +} + +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__pulse(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* + There have been reports from users where buffers of < ~20ms result glitches when running through + PipeWire. To work around this we're going to have to use a different default buffer size. + */ + const ma_uint32 defaultPeriodSizeInMilliseconds_LowLatency = 25; + const ma_uint32 defaultPeriodSizeInMilliseconds_Conservative = MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE; + + MA_ASSERT(nativeSampleRate != 0); + + if (pDescriptor->periodSizeInFrames == 0) { + if (pDescriptor->periodSizeInMilliseconds == 0) { + if (performanceProfile == ma_performance_profile_low_latency) { + return ma_calculate_buffer_size_in_frames_from_milliseconds(defaultPeriodSizeInMilliseconds_LowLatency, nativeSampleRate); + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(defaultPeriodSizeInMilliseconds_Conservative, nativeSampleRate); + } + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptor->periodSizeInMilliseconds, nativeSampleRate); + } + } else { + return pDescriptor->periodSizeInFrames; + } +} + +static ma_result ma_device_init__pulse(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + /* + Notes for PulseAudio: + + - When both the period size in frames and milliseconds are 0, we default to miniaudio's + default buffer sizes rather than leaving it up to PulseAudio because I don't trust + PulseAudio to give us any kind of reasonable latency by default. + + - Do not ever, *ever* forget to use MA_PA_STREAM_ADJUST_LATENCY. If you don't specify this + flag, capture mode will just not work properly until you open another PulseAudio app. + */ + + ma_result result = MA_SUCCESS; + int error = 0; + const char* devPlayback = NULL; + const char* devCapture = NULL; + ma_format format = ma_format_unknown; + ma_uint32 channels = 0; + ma_uint32 sampleRate = 0; + ma_pa_sink_info sinkInfo; + ma_pa_source_info sourceInfo; + ma_pa_sample_spec ss; + ma_pa_channel_map cmap; + ma_pa_buffer_attr attr; + const ma_pa_sample_spec* pActualSS = NULL; + const ma_pa_buffer_attr* pActualAttr = NULL; + ma_uint32 iChannel; + ma_pa_stream_flags_t streamFlags; + + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->pulse); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with the PulseAudio backend. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + if (pDescriptorPlayback->pDeviceID != NULL) { + devPlayback = pDescriptorPlayback->pDeviceID->pulse; + } + + format = pDescriptorPlayback->format; + channels = pDescriptorPlayback->channels; + sampleRate = pDescriptorPlayback->sampleRate; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + if (pDescriptorCapture->pDeviceID != NULL) { + devCapture = pDescriptorCapture->pDeviceID->pulse; + } + + format = pDescriptorCapture->format; + channels = pDescriptorCapture->channels; + sampleRate = pDescriptorCapture->sampleRate; + } + + + + result = ma_init_pa_mainloop_and_pa_context__pulse(pDevice->pContext, pDevice->pContext->pulse.pApplicationName, pDevice->pContext->pulse.pServerName, MA_FALSE, &pDevice->pulse.pMainLoop, &pDevice->pulse.pPulseContext); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to initialize PA mainloop and context for device.\n"); + return result; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + result = ma_context_get_source_info__pulse(pDevice->pContext, devCapture, &sourceInfo); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve source info for capture device."); + goto on_error0; + } + + ss = sourceInfo.sample_spec; + cmap = sourceInfo.channel_map; + + /* Use the requested channel count if we have one. */ + if (pDescriptorCapture->channels != 0) { + ss.channels = pDescriptorCapture->channels; + } + + /* Use a default channel map. */ + ((ma_pa_channel_map_init_extend_proc)pDevice->pContext->pulse.pa_channel_map_init_extend)(&cmap, ss.channels, MA_PA_CHANNEL_MAP_DEFAULT); + + /* Use the requested sample rate if one was specified. */ + if (pDescriptorCapture->sampleRate != 0) { + ss.rate = pDescriptorCapture->sampleRate; + } + streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_ADJUST_LATENCY; + + if (ma_format_from_pulse(ss.format) == ma_format_unknown) { + if (ma_is_little_endian()) { + ss.format = MA_PA_SAMPLE_FLOAT32LE; + } else { + ss.format = MA_PA_SAMPLE_FLOAT32BE; + } + streamFlags |= MA_PA_STREAM_FIX_FORMAT; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.format not supported by miniaudio. Defaulting to PA_SAMPLE_FLOAT32.\n"); + } + if (ss.rate == 0) { + ss.rate = MA_DEFAULT_SAMPLE_RATE; + streamFlags |= MA_PA_STREAM_FIX_RATE; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.rate = 0. Defaulting to %d.\n", ss.rate); + } + if (ss.channels == 0) { + ss.channels = MA_DEFAULT_CHANNELS; + streamFlags |= MA_PA_STREAM_FIX_CHANNELS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.channels = 0. Defaulting to %d.\n", ss.channels); + } + + /* We now have enough information to calculate our actual period size in frames. */ + pDescriptorCapture->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__pulse(pDescriptorCapture, ss.rate, pConfig->performanceProfile); + + attr = ma_device__pa_buffer_attr_new(pDescriptorCapture->periodSizeInFrames, pDescriptorCapture->periodCount, &ss); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorCapture->periodSizeInFrames); + + pDevice->pulse.pStreamCapture = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNameCapture, &ss, &cmap); + if (pDevice->pulse.pStreamCapture == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio capture stream.\n"); + result = MA_ERROR; + goto on_error0; + } + + + /* The callback needs to be set before connecting the stream. */ + ((ma_pa_stream_set_read_callback_proc)pDevice->pContext->pulse.pa_stream_set_read_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_read__pulse, pDevice); + + /* State callback for checking when the device has been corked. */ + ((ma_pa_stream_set_suspended_callback_proc)pDevice->pContext->pulse.pa_stream_set_suspended_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_suspended__pulse, pDevice); + + /* Rerouting notification. */ + ((ma_pa_stream_set_moved_callback_proc)pDevice->pContext->pulse.pa_stream_set_moved_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_rerouted__pulse, pDevice); + + + /* Connect after we've got all of our internal state set up. */ + if (devCapture != NULL) { + streamFlags |= MA_PA_STREAM_DONT_MOVE; + } + + error = ((ma_pa_stream_connect_record_proc)pDevice->pContext->pulse.pa_stream_connect_record)((ma_pa_stream*)pDevice->pulse.pStreamCapture, devCapture, &attr, streamFlags); + if (error != MA_PA_OK) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio capture stream."); + result = ma_result_from_pulse(error); + goto on_error1; + } + + result = ma_wait_for_pa_stream_to_connect__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, (ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (result != MA_SUCCESS) { + goto on_error2; + } + + + /* Internal format. */ + pActualSS = ((ma_pa_stream_get_sample_spec_proc)pDevice->pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualSS != NULL) { + ss = *pActualSS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture sample spec: format=%s, channels=%d, rate=%d\n", ma_get_format_name(ma_format_from_pulse(ss.format)), ss.channels, ss.rate); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Failed to retrieve capture sample spec.\n"); + } + + pDescriptorCapture->format = ma_format_from_pulse(ss.format); + pDescriptorCapture->channels = ss.channels; + pDescriptorCapture->sampleRate = ss.rate; + + if (pDescriptorCapture->format == ma_format_unknown || pDescriptorCapture->channels == 0 || pDescriptorCapture->sampleRate == 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Capture sample spec is invalid. Device unusable by miniaudio. format=%s, channels=%d, sampleRate=%d.\n", ma_get_format_name(pDescriptorCapture->format), pDescriptorCapture->channels, pDescriptorCapture->sampleRate); + result = MA_ERROR; + goto on_error4; + } + + /* Internal channel map. */ + + /* + Bug in PipeWire. There have been reports that PipeWire is returning AUX channels when reporting + the channel map. To somewhat workaround this, I'm hacking in a hard coded channel map for mono + and stereo. In this case it should be safe to assume mono = MONO and stereo = LEFT/RIGHT. For + all other channel counts we need to just put up with whatever PipeWire reports and hope it gets + fixed sooner than later. I might remove this hack later. + */ + if (pDescriptorCapture->channels > 2) { + for (iChannel = 0; iChannel < pDescriptorCapture->channels; ++iChannel) { + pDescriptorCapture->channelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]); + } + } else { + /* Hack for mono and stereo. */ + if (pDescriptorCapture->channels == 1) { + pDescriptorCapture->channelMap[0] = MA_CHANNEL_MONO; + } else if (pDescriptorCapture->channels == 2) { + pDescriptorCapture->channelMap[0] = MA_CHANNEL_FRONT_LEFT; + pDescriptorCapture->channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + } + + + /* Buffer. */ + pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pDevice->pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualAttr != NULL) { + attr = *pActualAttr; + } + + if (attr.fragsize > 0) { + pDescriptorCapture->periodCount = ma_max(attr.maxlength / attr.fragsize, 1); + } else { + pDescriptorCapture->periodCount = 1; + } + + pDescriptorCapture->periodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) / pDescriptorCapture->periodCount; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorCapture->periodSizeInFrames); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_context_get_sink_info__pulse(pDevice->pContext, devPlayback, &sinkInfo); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve sink info for playback device.\n"); + goto on_error2; + } + + ss = sinkInfo.sample_spec; + cmap = sinkInfo.channel_map; + + /* Use the requested channel count if we have one. */ + if (pDescriptorPlayback->channels != 0) { + ss.channels = pDescriptorPlayback->channels; + } + + /* Use a default channel map. */ + ((ma_pa_channel_map_init_extend_proc)pDevice->pContext->pulse.pa_channel_map_init_extend)(&cmap, ss.channels, MA_PA_CHANNEL_MAP_DEFAULT); + + + /* Use the requested sample rate if one was specified. */ + if (pDescriptorPlayback->sampleRate != 0) { + ss.rate = pDescriptorPlayback->sampleRate; + } + + streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_ADJUST_LATENCY; + if (ma_format_from_pulse(ss.format) == ma_format_unknown) { + if (ma_is_little_endian()) { + ss.format = MA_PA_SAMPLE_FLOAT32LE; + } else { + ss.format = MA_PA_SAMPLE_FLOAT32BE; + } + streamFlags |= MA_PA_STREAM_FIX_FORMAT; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.format not supported by miniaudio. Defaulting to PA_SAMPLE_FLOAT32.\n"); + } + if (ss.rate == 0) { + ss.rate = MA_DEFAULT_SAMPLE_RATE; + streamFlags |= MA_PA_STREAM_FIX_RATE; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.rate = 0. Defaulting to %d.\n", ss.rate); + } + if (ss.channels == 0) { + ss.channels = MA_DEFAULT_CHANNELS; + streamFlags |= MA_PA_STREAM_FIX_CHANNELS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.channels = 0. Defaulting to %d.\n", ss.channels); + } + + /* We now have enough information to calculate the actual buffer size in frames. */ + pDescriptorPlayback->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__pulse(pDescriptorPlayback, ss.rate, pConfig->performanceProfile); + + attr = ma_device__pa_buffer_attr_new(pDescriptorPlayback->periodSizeInFrames, pDescriptorPlayback->periodCount, &ss); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorPlayback->periodSizeInFrames); + + pDevice->pulse.pStreamPlayback = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNamePlayback, &ss, &cmap); + if (pDevice->pulse.pStreamPlayback == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio playback stream.\n"); + result = MA_ERROR; + goto on_error2; + } + + + /* + Note that this callback will be fired as soon as the stream is connected, even though it's started as corked. The callback needs to handle a + device state of ma_device_state_uninitialized. + */ + ((ma_pa_stream_set_write_callback_proc)pDevice->pContext->pulse.pa_stream_set_write_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_write__pulse, pDevice); + + /* State callback for checking when the device has been corked. */ + ((ma_pa_stream_set_suspended_callback_proc)pDevice->pContext->pulse.pa_stream_set_suspended_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_suspended__pulse, pDevice); + + /* Rerouting notification. */ + ((ma_pa_stream_set_moved_callback_proc)pDevice->pContext->pulse.pa_stream_set_moved_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_rerouted__pulse, pDevice); + + + /* Connect after we've got all of our internal state set up. */ + if (devPlayback != NULL) { + streamFlags |= MA_PA_STREAM_DONT_MOVE; + } + + error = ((ma_pa_stream_connect_playback_proc)pDevice->pContext->pulse.pa_stream_connect_playback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, devPlayback, &attr, streamFlags, NULL, NULL); + if (error != MA_PA_OK) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio playback stream."); + result = ma_result_from_pulse(error); + goto on_error3; + } + + result = ma_wait_for_pa_stream_to_connect__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, (ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (result != MA_SUCCESS) { + goto on_error3; + } + + + /* Internal format. */ + pActualSS = ((ma_pa_stream_get_sample_spec_proc)pDevice->pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualSS != NULL) { + ss = *pActualSS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback sample spec: format=%s, channels=%d, rate=%d\n", ma_get_format_name(ma_format_from_pulse(ss.format)), ss.channels, ss.rate); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Failed to retrieve playback sample spec.\n"); + } + + pDescriptorPlayback->format = ma_format_from_pulse(ss.format); + pDescriptorPlayback->channels = ss.channels; + pDescriptorPlayback->sampleRate = ss.rate; + + if (pDescriptorPlayback->format == ma_format_unknown || pDescriptorPlayback->channels == 0 || pDescriptorPlayback->sampleRate == 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Playback sample spec is invalid. Device unusable by miniaudio. format=%s, channels=%d, sampleRate=%d.\n", ma_get_format_name(pDescriptorPlayback->format), pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate); + result = MA_ERROR; + goto on_error4; + } + + /* Internal channel map. */ + + /* + Bug in PipeWire. There have been reports that PipeWire is returning AUX channels when reporting + the channel map. To somewhat workaround this, I'm hacking in a hard coded channel map for mono + and stereo. In this case it should be safe to assume mono = MONO and stereo = LEFT/RIGHT. For + all other channel counts we need to just put up with whatever PipeWire reports and hope it gets + fixed sooner than later. I might remove this hack later. + */ + if (pDescriptorPlayback->channels > 2) { + for (iChannel = 0; iChannel < pDescriptorPlayback->channels; ++iChannel) { + pDescriptorPlayback->channelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]); + } + } else { + /* Hack for mono and stereo. */ + if (pDescriptorPlayback->channels == 1) { + pDescriptorPlayback->channelMap[0] = MA_CHANNEL_MONO; + } else if (pDescriptorPlayback->channels == 2) { + pDescriptorPlayback->channelMap[0] = MA_CHANNEL_FRONT_LEFT; + pDescriptorPlayback->channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + } + + + /* Buffer. */ + pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pDevice->pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualAttr != NULL) { + attr = *pActualAttr; + } + + if (attr.tlength > 0) { + pDescriptorPlayback->periodCount = ma_max(attr.maxlength / attr.tlength, 1); + } else { + pDescriptorPlayback->periodCount = 1; + } + + pDescriptorPlayback->periodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels) / pDescriptorPlayback->periodCount; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorPlayback->periodSizeInFrames); + } + + + /* + We need a ring buffer for handling duplex mode. We can use the main duplex ring buffer in the main + part of the ma_device struct. We cannot, however, depend on ma_device_init() initializing this for + us later on because that will only do it if it's a fully asynchronous backend - i.e. the + onDeviceDataLoop callback is NULL, which is not the case for PulseAudio. + */ + if (pConfig->deviceType == ma_device_type_duplex) { + ma_format rbFormat = (format != ma_format_unknown) ? format : pDescriptorCapture->format; + ma_uint32 rbChannels = (channels > 0) ? channels : pDescriptorCapture->channels; + ma_uint32 rbSampleRate = (sampleRate > 0) ? sampleRate : pDescriptorCapture->sampleRate; + + result = ma_duplex_rb_init(rbFormat, rbChannels, rbSampleRate, pDescriptorCapture->sampleRate, pDescriptorCapture->periodSizeInFrames, &pDevice->pContext->allocationCallbacks, &pDevice->duplexRB); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to initialize ring buffer. %s.\n", ma_result_description(result)); + goto on_error4; + } + } + + return MA_SUCCESS; + + +on_error4: + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pDevice->pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } +on_error3: + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_unref_proc)pDevice->pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } +on_error2: + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pDevice->pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } +on_error1: + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_unref_proc)pDevice->pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } +on_error0: + return result; +} + + +static void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData) +{ + ma_bool32* pIsSuccessful = (ma_bool32*)pUserData; + MA_ASSERT(pIsSuccessful != NULL); + + *pIsSuccessful = (ma_bool32)success; + + (void)pStream; /* Unused. */ +} + +static ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork) +{ + ma_context* pContext = pDevice->pContext; + ma_bool32 wasSuccessful; + ma_pa_stream* pStream; + ma_pa_operation* pOP; + ma_result result; + + /* This should not be called with a duplex device type. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + wasSuccessful = MA_FALSE; + + pStream = (ma_pa_stream*)((deviceType == ma_device_type_capture) ? pDevice->pulse.pStreamCapture : pDevice->pulse.pStreamPlayback); + MA_ASSERT(pStream != NULL); + + pOP = ((ma_pa_stream_cork_proc)pContext->pulse.pa_stream_cork)(pStream, cork, ma_pulse_operation_complete_callback, &wasSuccessful); + if (pOP == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to cork PulseAudio stream."); + return MA_ERROR; + } + + result = ma_wait_for_operation_and_unref__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, pOP); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while waiting for the PulseAudio stream to cork."); + return result; + } + + if (!wasSuccessful) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to %s PulseAudio stream.", (cork) ? "stop" : "start"); + return MA_ERROR; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__pulse(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 0); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* + We need to fill some data before uncorking. Not doing this will result in the write callback + never getting fired. We're not going to abort if writing fails because I still want the device + to get uncorked. + */ + ma_device_write_to_stream__pulse(pDevice, (ma_pa_stream*)(pDevice->pulse.pStreamPlayback), NULL); /* No need to check the result here. Always want to fall through an uncork.*/ + + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 0); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__pulse(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 1); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* + Ideally we would drain the device here, but there's been cases where PulseAudio seems to be + broken on some systems to the point where no audio processing seems to happen. When this + happens, draining never completes and we get stuck here. For now I'm disabling draining of + the device so we don't just freeze the application. + */ +#if 0 + ma_pa_operation* pOP = ((ma_pa_stream_drain_proc)pDevice->pContext->pulse.pa_stream_drain)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_pulse_operation_complete_callback, &wasSuccessful); + ma_wait_for_operation_and_unref__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, pOP); +#endif + + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 1); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_data_loop__pulse(ma_device* pDevice) +{ + int resultPA; + + MA_ASSERT(pDevice != NULL); + + /* NOTE: Don't start the device here. It'll be done at a higher level. */ + + /* + All data is handled through callbacks. All we need to do is iterate over the main loop and let + the callbacks deal with it. + */ + while (ma_device_get_state(pDevice) == ma_device_state_started) { + resultPA = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL); + if (resultPA < 0) { + break; + } + } + + /* NOTE: Don't stop the device here. It'll be done at a higher level. */ + return MA_SUCCESS; +} + +static ma_result ma_device_data_loop_wakeup__pulse(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ((ma_pa_mainloop_wakeup_proc)pDevice->pContext->pulse.pa_mainloop_wakeup)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__pulse(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_pulseaudio); + + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pContext->pulse.pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pContext->pulse.pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pContext->pulse.pMainLoop); + + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->pulse.pulseSO); +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init__pulse(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result; +#ifndef MA_NO_RUNTIME_LINKING + const char* libpulseNames[] = { + "libpulse.so", + "libpulse.so.0" + }; + size_t i; + + for (i = 0; i < ma_countof(libpulseNames); ++i) { + pContext->pulse.pulseSO = ma_dlopen(ma_context_get_log(pContext), libpulseNames[i]); + if (pContext->pulse.pulseSO != NULL) { + break; + } + } + + if (pContext->pulse.pulseSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->pulse.pa_mainloop_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_new"); + pContext->pulse.pa_mainloop_free = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_free"); + pContext->pulse.pa_mainloop_quit = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_quit"); + pContext->pulse.pa_mainloop_get_api = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_get_api"); + pContext->pulse.pa_mainloop_iterate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_iterate"); + pContext->pulse.pa_mainloop_wakeup = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_wakeup"); + pContext->pulse.pa_threaded_mainloop_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_new"); + pContext->pulse.pa_threaded_mainloop_free = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_free"); + pContext->pulse.pa_threaded_mainloop_start = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_start"); + pContext->pulse.pa_threaded_mainloop_stop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_stop"); + pContext->pulse.pa_threaded_mainloop_lock = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_lock"); + pContext->pulse.pa_threaded_mainloop_unlock = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_unlock"); + pContext->pulse.pa_threaded_mainloop_wait = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_wait"); + pContext->pulse.pa_threaded_mainloop_signal = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_signal"); + pContext->pulse.pa_threaded_mainloop_accept = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_accept"); + pContext->pulse.pa_threaded_mainloop_get_retval = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_get_retval"); + pContext->pulse.pa_threaded_mainloop_get_api = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_get_api"); + pContext->pulse.pa_threaded_mainloop_in_thread = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_in_thread"); + pContext->pulse.pa_threaded_mainloop_set_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_set_name"); + pContext->pulse.pa_context_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_new"); + pContext->pulse.pa_context_unref = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_unref"); + pContext->pulse.pa_context_connect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_connect"); + pContext->pulse.pa_context_disconnect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_disconnect"); + pContext->pulse.pa_context_set_state_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_set_state_callback"); + pContext->pulse.pa_context_get_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_state"); + pContext->pulse.pa_context_get_sink_info_list = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_sink_info_list"); + pContext->pulse.pa_context_get_source_info_list = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_source_info_list"); + pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_sink_info_by_name"); + pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_source_info_by_name"); + pContext->pulse.pa_operation_unref = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_operation_unref"); + pContext->pulse.pa_operation_get_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_operation_get_state"); + pContext->pulse.pa_channel_map_init_extend = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_channel_map_init_extend"); + pContext->pulse.pa_channel_map_valid = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_channel_map_valid"); + pContext->pulse.pa_channel_map_compatible = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_channel_map_compatible"); + pContext->pulse.pa_stream_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_new"); + pContext->pulse.pa_stream_unref = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_unref"); + pContext->pulse.pa_stream_connect_playback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_connect_playback"); + pContext->pulse.pa_stream_connect_record = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_connect_record"); + pContext->pulse.pa_stream_disconnect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_disconnect"); + pContext->pulse.pa_stream_get_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_state"); + pContext->pulse.pa_stream_get_sample_spec = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_sample_spec"); + pContext->pulse.pa_stream_get_channel_map = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_channel_map"); + pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_buffer_attr"); + pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_buffer_attr"); + pContext->pulse.pa_stream_get_device_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_device_name"); + pContext->pulse.pa_stream_set_write_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_write_callback"); + pContext->pulse.pa_stream_set_read_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_read_callback"); + pContext->pulse.pa_stream_set_suspended_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_suspended_callback"); + pContext->pulse.pa_stream_set_moved_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_moved_callback"); + pContext->pulse.pa_stream_is_suspended = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_is_suspended"); + pContext->pulse.pa_stream_flush = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_flush"); + pContext->pulse.pa_stream_drain = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_drain"); + pContext->pulse.pa_stream_is_corked = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_is_corked"); + pContext->pulse.pa_stream_cork = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_cork"); + pContext->pulse.pa_stream_trigger = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_trigger"); + pContext->pulse.pa_stream_begin_write = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_begin_write"); + pContext->pulse.pa_stream_write = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_write"); + pContext->pulse.pa_stream_peek = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_peek"); + pContext->pulse.pa_stream_drop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_drop"); + pContext->pulse.pa_stream_writable_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_writable_size"); + pContext->pulse.pa_stream_readable_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_readable_size"); +#else + /* This strange assignment system is just for type safety. */ + ma_pa_mainloop_new_proc _pa_mainloop_new = pa_mainloop_new; + ma_pa_mainloop_free_proc _pa_mainloop_free = pa_mainloop_free; + ma_pa_mainloop_quit_proc _pa_mainloop_quit = pa_mainloop_quit; + ma_pa_mainloop_get_api_proc _pa_mainloop_get_api = pa_mainloop_get_api; + ma_pa_mainloop_iterate_proc _pa_mainloop_iterate = pa_mainloop_iterate; + ma_pa_mainloop_wakeup_proc _pa_mainloop_wakeup = pa_mainloop_wakeup; + ma_pa_threaded_mainloop_new_proc _pa_threaded_mainloop_new = pa_threaded_mainloop_new; + ma_pa_threaded_mainloop_free_proc _pa_threaded_mainloop_free = pa_threaded_mainloop_free; + ma_pa_threaded_mainloop_start_proc _pa_threaded_mainloop_start = pa_threaded_mainloop_start; + ma_pa_threaded_mainloop_stop_proc _pa_threaded_mainloop_stop = pa_threaded_mainloop_stop; + ma_pa_threaded_mainloop_lock_proc _pa_threaded_mainloop_lock = pa_threaded_mainloop_lock; + ma_pa_threaded_mainloop_unlock_proc _pa_threaded_mainloop_unlock = pa_threaded_mainloop_unlock; + ma_pa_threaded_mainloop_wait_proc _pa_threaded_mainloop_wait = pa_threaded_mainloop_wait; + ma_pa_threaded_mainloop_signal_proc _pa_threaded_mainloop_signal = pa_threaded_mainloop_signal; + ma_pa_threaded_mainloop_accept_proc _pa_threaded_mainloop_accept = pa_threaded_mainloop_accept; + ma_pa_threaded_mainloop_get_retval_proc _pa_threaded_mainloop_get_retval = pa_threaded_mainloop_get_retval; + ma_pa_threaded_mainloop_get_api_proc _pa_threaded_mainloop_get_api = pa_threaded_mainloop_get_api; + ma_pa_threaded_mainloop_in_thread_proc _pa_threaded_mainloop_in_thread = pa_threaded_mainloop_in_thread; + ma_pa_threaded_mainloop_set_name_proc _pa_threaded_mainloop_set_name = pa_threaded_mainloop_set_name; + ma_pa_context_new_proc _pa_context_new = pa_context_new; + ma_pa_context_unref_proc _pa_context_unref = pa_context_unref; + ma_pa_context_connect_proc _pa_context_connect = pa_context_connect; + ma_pa_context_disconnect_proc _pa_context_disconnect = pa_context_disconnect; + ma_pa_context_set_state_callback_proc _pa_context_set_state_callback = pa_context_set_state_callback; + ma_pa_context_get_state_proc _pa_context_get_state = pa_context_get_state; + ma_pa_context_get_sink_info_list_proc _pa_context_get_sink_info_list = pa_context_get_sink_info_list; + ma_pa_context_get_source_info_list_proc _pa_context_get_source_info_list = pa_context_get_source_info_list; + ma_pa_context_get_sink_info_by_name_proc _pa_context_get_sink_info_by_name = pa_context_get_sink_info_by_name; + ma_pa_context_get_source_info_by_name_proc _pa_context_get_source_info_by_name= pa_context_get_source_info_by_name; + ma_pa_operation_unref_proc _pa_operation_unref = pa_operation_unref; + ma_pa_operation_get_state_proc _pa_operation_get_state = pa_operation_get_state; + ma_pa_channel_map_init_extend_proc _pa_channel_map_init_extend = pa_channel_map_init_extend; + ma_pa_channel_map_valid_proc _pa_channel_map_valid = pa_channel_map_valid; + ma_pa_channel_map_compatible_proc _pa_channel_map_compatible = pa_channel_map_compatible; + ma_pa_stream_new_proc _pa_stream_new = pa_stream_new; + ma_pa_stream_unref_proc _pa_stream_unref = pa_stream_unref; + ma_pa_stream_connect_playback_proc _pa_stream_connect_playback = pa_stream_connect_playback; + ma_pa_stream_connect_record_proc _pa_stream_connect_record = pa_stream_connect_record; + ma_pa_stream_disconnect_proc _pa_stream_disconnect = pa_stream_disconnect; + ma_pa_stream_get_state_proc _pa_stream_get_state = pa_stream_get_state; + ma_pa_stream_get_sample_spec_proc _pa_stream_get_sample_spec = pa_stream_get_sample_spec; + ma_pa_stream_get_channel_map_proc _pa_stream_get_channel_map = pa_stream_get_channel_map; + ma_pa_stream_get_buffer_attr_proc _pa_stream_get_buffer_attr = pa_stream_get_buffer_attr; + ma_pa_stream_set_buffer_attr_proc _pa_stream_set_buffer_attr = pa_stream_set_buffer_attr; + ma_pa_stream_get_device_name_proc _pa_stream_get_device_name = pa_stream_get_device_name; + ma_pa_stream_set_write_callback_proc _pa_stream_set_write_callback = pa_stream_set_write_callback; + ma_pa_stream_set_read_callback_proc _pa_stream_set_read_callback = pa_stream_set_read_callback; + ma_pa_stream_set_suspended_callback_proc _pa_stream_set_suspended_callback = pa_stream_set_suspended_callback; + ma_pa_stream_set_moved_callback_proc _pa_stream_set_moved_callback = pa_stream_set_moved_callback; + ma_pa_stream_is_suspended_proc _pa_stream_is_suspended = pa_stream_is_suspended; + ma_pa_stream_flush_proc _pa_stream_flush = pa_stream_flush; + ma_pa_stream_drain_proc _pa_stream_drain = pa_stream_drain; + ma_pa_stream_is_corked_proc _pa_stream_is_corked = pa_stream_is_corked; + ma_pa_stream_cork_proc _pa_stream_cork = pa_stream_cork; + ma_pa_stream_trigger_proc _pa_stream_trigger = pa_stream_trigger; + ma_pa_stream_begin_write_proc _pa_stream_begin_write = pa_stream_begin_write; + ma_pa_stream_write_proc _pa_stream_write = pa_stream_write; + ma_pa_stream_peek_proc _pa_stream_peek = pa_stream_peek; + ma_pa_stream_drop_proc _pa_stream_drop = pa_stream_drop; + ma_pa_stream_writable_size_proc _pa_stream_writable_size = pa_stream_writable_size; + ma_pa_stream_readable_size_proc _pa_stream_readable_size = pa_stream_readable_size; + + pContext->pulse.pa_mainloop_new = (ma_proc)_pa_mainloop_new; + pContext->pulse.pa_mainloop_free = (ma_proc)_pa_mainloop_free; + pContext->pulse.pa_mainloop_quit = (ma_proc)_pa_mainloop_quit; + pContext->pulse.pa_mainloop_get_api = (ma_proc)_pa_mainloop_get_api; + pContext->pulse.pa_mainloop_iterate = (ma_proc)_pa_mainloop_iterate; + pContext->pulse.pa_mainloop_wakeup = (ma_proc)_pa_mainloop_wakeup; + pContext->pulse.pa_threaded_mainloop_new = (ma_proc)_pa_threaded_mainloop_new; + pContext->pulse.pa_threaded_mainloop_free = (ma_proc)_pa_threaded_mainloop_free; + pContext->pulse.pa_threaded_mainloop_start = (ma_proc)_pa_threaded_mainloop_start; + pContext->pulse.pa_threaded_mainloop_stop = (ma_proc)_pa_threaded_mainloop_stop; + pContext->pulse.pa_threaded_mainloop_lock = (ma_proc)_pa_threaded_mainloop_lock; + pContext->pulse.pa_threaded_mainloop_unlock = (ma_proc)_pa_threaded_mainloop_unlock; + pContext->pulse.pa_threaded_mainloop_wait = (ma_proc)_pa_threaded_mainloop_wait; + pContext->pulse.pa_threaded_mainloop_signal = (ma_proc)_pa_threaded_mainloop_signal; + pContext->pulse.pa_threaded_mainloop_accept = (ma_proc)_pa_threaded_mainloop_accept; + pContext->pulse.pa_threaded_mainloop_get_retval = (ma_proc)_pa_threaded_mainloop_get_retval; + pContext->pulse.pa_threaded_mainloop_get_api = (ma_proc)_pa_threaded_mainloop_get_api; + pContext->pulse.pa_threaded_mainloop_in_thread = (ma_proc)_pa_threaded_mainloop_in_thread; + pContext->pulse.pa_threaded_mainloop_set_name = (ma_proc)_pa_threaded_mainloop_set_name; + pContext->pulse.pa_context_new = (ma_proc)_pa_context_new; + pContext->pulse.pa_context_unref = (ma_proc)_pa_context_unref; + pContext->pulse.pa_context_connect = (ma_proc)_pa_context_connect; + pContext->pulse.pa_context_disconnect = (ma_proc)_pa_context_disconnect; + pContext->pulse.pa_context_set_state_callback = (ma_proc)_pa_context_set_state_callback; + pContext->pulse.pa_context_get_state = (ma_proc)_pa_context_get_state; + pContext->pulse.pa_context_get_sink_info_list = (ma_proc)_pa_context_get_sink_info_list; + pContext->pulse.pa_context_get_source_info_list = (ma_proc)_pa_context_get_source_info_list; + pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)_pa_context_get_sink_info_by_name; + pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)_pa_context_get_source_info_by_name; + pContext->pulse.pa_operation_unref = (ma_proc)_pa_operation_unref; + pContext->pulse.pa_operation_get_state = (ma_proc)_pa_operation_get_state; + pContext->pulse.pa_channel_map_init_extend = (ma_proc)_pa_channel_map_init_extend; + pContext->pulse.pa_channel_map_valid = (ma_proc)_pa_channel_map_valid; + pContext->pulse.pa_channel_map_compatible = (ma_proc)_pa_channel_map_compatible; + pContext->pulse.pa_stream_new = (ma_proc)_pa_stream_new; + pContext->pulse.pa_stream_unref = (ma_proc)_pa_stream_unref; + pContext->pulse.pa_stream_connect_playback = (ma_proc)_pa_stream_connect_playback; + pContext->pulse.pa_stream_connect_record = (ma_proc)_pa_stream_connect_record; + pContext->pulse.pa_stream_disconnect = (ma_proc)_pa_stream_disconnect; + pContext->pulse.pa_stream_get_state = (ma_proc)_pa_stream_get_state; + pContext->pulse.pa_stream_get_sample_spec = (ma_proc)_pa_stream_get_sample_spec; + pContext->pulse.pa_stream_get_channel_map = (ma_proc)_pa_stream_get_channel_map; + pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)_pa_stream_get_buffer_attr; + pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)_pa_stream_set_buffer_attr; + pContext->pulse.pa_stream_get_device_name = (ma_proc)_pa_stream_get_device_name; + pContext->pulse.pa_stream_set_write_callback = (ma_proc)_pa_stream_set_write_callback; + pContext->pulse.pa_stream_set_read_callback = (ma_proc)_pa_stream_set_read_callback; + pContext->pulse.pa_stream_set_suspended_callback = (ma_proc)_pa_stream_set_suspended_callback; + pContext->pulse.pa_stream_set_moved_callback = (ma_proc)_pa_stream_set_moved_callback; + pContext->pulse.pa_stream_is_suspended = (ma_proc)_pa_stream_is_suspended; + pContext->pulse.pa_stream_flush = (ma_proc)_pa_stream_flush; + pContext->pulse.pa_stream_drain = (ma_proc)_pa_stream_drain; + pContext->pulse.pa_stream_is_corked = (ma_proc)_pa_stream_is_corked; + pContext->pulse.pa_stream_cork = (ma_proc)_pa_stream_cork; + pContext->pulse.pa_stream_trigger = (ma_proc)_pa_stream_trigger; + pContext->pulse.pa_stream_begin_write = (ma_proc)_pa_stream_begin_write; + pContext->pulse.pa_stream_write = (ma_proc)_pa_stream_write; + pContext->pulse.pa_stream_peek = (ma_proc)_pa_stream_peek; + pContext->pulse.pa_stream_drop = (ma_proc)_pa_stream_drop; + pContext->pulse.pa_stream_writable_size = (ma_proc)_pa_stream_writable_size; + pContext->pulse.pa_stream_readable_size = (ma_proc)_pa_stream_readable_size; +#endif + + /* We need to make a copy of the application and server names so we can pass them to the pa_context of each device. */ + pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName, &pContext->allocationCallbacks); + if (pContext->pulse.pApplicationName == NULL && pConfig->pulse.pApplicationName != NULL) { + return MA_OUT_OF_MEMORY; + } + + pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName, &pContext->allocationCallbacks); + if (pContext->pulse.pServerName == NULL && pConfig->pulse.pServerName != NULL) { + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + result = ma_init_pa_mainloop_and_pa_context__pulse(pContext, pConfig->pulse.pApplicationName, pConfig->pulse.pServerName, pConfig->pulse.tryAutoSpawn, &pContext->pulse.pMainLoop, &pContext->pulse.pPulseContext); + if (result != MA_SUCCESS) { + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->pulse.pulseSO); +#endif + return result; + } + + /* With pa_mainloop we run a synchronous backend, but we implement our own main loop. */ + pCallbacks->onContextInit = ma_context_init__pulse; + pCallbacks->onContextUninit = ma_context_uninit__pulse; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__pulse; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__pulse; + pCallbacks->onDeviceInit = ma_device_init__pulse; + pCallbacks->onDeviceUninit = ma_device_uninit__pulse; + pCallbacks->onDeviceStart = ma_device_start__pulse; + pCallbacks->onDeviceStop = ma_device_stop__pulse; + pCallbacks->onDeviceRead = NULL; /* Not used because we're implementing onDeviceDataLoop. */ + pCallbacks->onDeviceWrite = NULL; /* Not used because we're implementing onDeviceDataLoop. */ + pCallbacks->onDeviceDataLoop = ma_device_data_loop__pulse; + pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__pulse; + + return MA_SUCCESS; +} +#endif + + +/****************************************************************************** + +JACK Backend + +******************************************************************************/ +#ifdef MA_HAS_JACK + +/* It is assumed jack.h is available when compile-time linking is being used. */ +#ifdef MA_NO_RUNTIME_LINKING +#include + +typedef jack_nframes_t ma_jack_nframes_t; +typedef jack_options_t ma_jack_options_t; +typedef jack_status_t ma_jack_status_t; +typedef jack_client_t ma_jack_client_t; +typedef jack_port_t ma_jack_port_t; +typedef JackProcessCallback ma_JackProcessCallback; +typedef JackBufferSizeCallback ma_JackBufferSizeCallback; +typedef JackShutdownCallback ma_JackShutdownCallback; +#define MA_JACK_DEFAULT_AUDIO_TYPE JACK_DEFAULT_AUDIO_TYPE +#define ma_JackNoStartServer JackNoStartServer +#define ma_JackPortIsInput JackPortIsInput +#define ma_JackPortIsOutput JackPortIsOutput +#define ma_JackPortIsPhysical JackPortIsPhysical +#else +typedef ma_uint32 ma_jack_nframes_t; +typedef int ma_jack_options_t; +typedef int ma_jack_status_t; +typedef struct ma_jack_client_t ma_jack_client_t; +typedef struct ma_jack_port_t ma_jack_port_t; +typedef int (* ma_JackProcessCallback) (ma_jack_nframes_t nframes, void* arg); +typedef int (* ma_JackBufferSizeCallback)(ma_jack_nframes_t nframes, void* arg); +typedef void (* ma_JackShutdownCallback) (void* arg); +#define MA_JACK_DEFAULT_AUDIO_TYPE "32 bit float mono audio" +#define ma_JackNoStartServer 1 +#define ma_JackPortIsInput 1 +#define ma_JackPortIsOutput 2 +#define ma_JackPortIsPhysical 4 +#endif + +typedef ma_jack_client_t* (* ma_jack_client_open_proc) (const char* client_name, ma_jack_options_t options, ma_jack_status_t* status, ...); +typedef int (* ma_jack_client_close_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_client_name_size_proc) (void); +typedef int (* ma_jack_set_process_callback_proc) (ma_jack_client_t* client, ma_JackProcessCallback process_callback, void* arg); +typedef int (* ma_jack_set_buffer_size_callback_proc)(ma_jack_client_t* client, ma_JackBufferSizeCallback bufsize_callback, void* arg); +typedef void (* ma_jack_on_shutdown_proc) (ma_jack_client_t* client, ma_JackShutdownCallback function, void* arg); +typedef ma_jack_nframes_t (* ma_jack_get_sample_rate_proc) (ma_jack_client_t* client); +typedef ma_jack_nframes_t (* ma_jack_get_buffer_size_proc) (ma_jack_client_t* client); +typedef const char** (* ma_jack_get_ports_proc) (ma_jack_client_t* client, const char* port_name_pattern, const char* type_name_pattern, unsigned long flags); +typedef int (* ma_jack_activate_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_deactivate_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_connect_proc) (ma_jack_client_t* client, const char* source_port, const char* destination_port); +typedef ma_jack_port_t* (* ma_jack_port_register_proc) (ma_jack_client_t* client, const char* port_name, const char* port_type, unsigned long flags, unsigned long buffer_size); +typedef const char* (* ma_jack_port_name_proc) (const ma_jack_port_t* port); +typedef void* (* ma_jack_port_get_buffer_proc) (ma_jack_port_t* port, ma_jack_nframes_t nframes); +typedef void (* ma_jack_free_proc) (void* ptr); + +static ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient) +{ + size_t maxClientNameSize; + char clientName[256]; + ma_jack_status_t status; + ma_jack_client_t* pClient; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppClient != NULL); + + if (ppClient) { + *ppClient = NULL; + } + + maxClientNameSize = ((ma_jack_client_name_size_proc)pContext->jack.jack_client_name_size)(); /* Includes null terminator. */ + ma_strncpy_s(clientName, ma_min(sizeof(clientName), maxClientNameSize), (pContext->jack.pClientName != NULL) ? pContext->jack.pClientName : "miniaudio", (size_t)-1); + + pClient = ((ma_jack_client_open_proc)pContext->jack.jack_client_open)(clientName, (pContext->jack.tryStartServer) ? 0 : ma_JackNoStartServer, &status, NULL); + if (pClient == NULL) { + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + if (ppClient) { + *ppClient = pClient; + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* JACK only uses default devices. */ + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* JACK only uses default devices. */ + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + (void)cbResult; /* For silencing a static analysis warning. */ + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_jack_client_t* pClient; + ma_result result; + const char** ppPorts; + + MA_ASSERT(pContext != NULL); + + if (pDeviceID != NULL && pDeviceID->jack != 0) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* Jack only uses default devices. */ + pDeviceInfo->isDefault = MA_TRUE; + + /* Jack only supports f32 and has a specific channel count and sample rate. */ + pDeviceInfo->nativeDataFormats[0].format = ma_format_f32; + + /* The channel count and sample rate can only be determined by opening the device. */ + result = ma_context_open_client__jack(pContext, &pClient); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client."); + return result; + } + + pDeviceInfo->nativeDataFormats[0].sampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pClient); + pDeviceInfo->nativeDataFormats[0].channels = 0; + + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput)); + if (ppPorts == NULL) { + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + while (ppPorts[pDeviceInfo->nativeDataFormats[0].channels] != NULL) { + pDeviceInfo->nativeDataFormats[0].channels += 1; + } + + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts); + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient); + + (void)pContext; + return MA_SUCCESS; +} + + +static ma_result ma_device_uninit__jack(ma_device* pDevice) +{ + ma_context* pContext; + + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->jack.pClient != NULL) { + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDevice->jack.pClient); + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_free(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); + ma_free(pDevice->jack.ppPortsCapture, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_free(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); + ma_free(pDevice->jack.ppPortsPlayback, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +static void ma_device__jack_shutdown_callback(void* pUserData) +{ + /* JACK died. Stop the device. */ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_device_stop(pDevice); +} + +static int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + size_t newBufferSize = frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + float* pNewBuffer = (float*)ma_calloc(newBufferSize, &pDevice->pContext->allocationCallbacks); + if (pNewBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma_free(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); + + pDevice->jack.pIntermediaryBufferCapture = pNewBuffer; + pDevice->playback.internalPeriodSizeInFrames = frameCount; + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + size_t newBufferSize = frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + float* pNewBuffer = (float*)ma_calloc(newBufferSize, &pDevice->pContext->allocationCallbacks); + if (pNewBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma_free(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); + + pDevice->jack.pIntermediaryBufferPlayback = pNewBuffer; + pDevice->playback.internalPeriodSizeInFrames = frameCount; + } + + return 0; +} + +static int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData) +{ + ma_device* pDevice; + ma_context* pContext; + ma_uint32 iChannel; + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + /* Channels need to be interleaved. */ + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + const float* pSrc = (const float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.ppPortsCapture[iChannel], frameCount); + if (pSrc != NULL) { + float* pDst = pDevice->jack.pIntermediaryBufferCapture + iChannel; + ma_jack_nframes_t iFrame; + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + *pDst = *pSrc; + + pDst += pDevice->capture.internalChannels; + pSrc += 1; + } + } + } + + ma_device_handle_backend_data_callback(pDevice, NULL, pDevice->jack.pIntermediaryBufferCapture, frameCount); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_handle_backend_data_callback(pDevice, pDevice->jack.pIntermediaryBufferPlayback, NULL, frameCount); + + /* Channels need to be deinterleaved. */ + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + float* pDst = (float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.ppPortsPlayback[iChannel], frameCount); + if (pDst != NULL) { + const float* pSrc = pDevice->jack.pIntermediaryBufferPlayback + iChannel; + ma_jack_nframes_t iFrame; + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + *pDst = *pSrc; + + pDst += 1; + pSrc += pDevice->playback.internalChannels; + } + } + } + } + + return 0; +} + +static ma_result ma_device_init__jack(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + ma_uint32 periodSizeInFrames; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Loopback mode not supported."); + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* Only supporting default devices with JACK. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->pDeviceID != NULL && pDescriptorPlayback->pDeviceID->jack != 0) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->pDeviceID != NULL && pDescriptorCapture->pDeviceID->jack != 0)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Only default devices are supported."); + return MA_NO_DEVICE; + } + + /* No exclusive mode with the JACK backend. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Exclusive mode not supported."); + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Open the client. */ + result = ma_context_open_client__jack(pDevice->pContext, (ma_jack_client_t**)&pDevice->jack.pClient); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client."); + return result; + } + + /* Callbacks. */ + if (((ma_jack_set_process_callback_proc)pDevice->pContext->jack.jack_set_process_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_process_callback, pDevice) != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to set process callback."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + if (((ma_jack_set_buffer_size_callback_proc)pDevice->pContext->jack.jack_set_buffer_size_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_buffer_size_callback, pDevice) != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to set buffer size callback."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + ((ma_jack_on_shutdown_proc)pDevice->pContext->jack.jack_on_shutdown)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_shutdown_callback, pDevice); + + + /* The buffer size in frames can change. */ + periodSizeInFrames = ((ma_jack_get_buffer_size_proc)pDevice->pContext->jack.jack_get_buffer_size)((ma_jack_client_t*)pDevice->jack.pClient); + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPort; + const char** ppPorts; + + pDescriptorCapture->format = ma_format_f32; + pDescriptorCapture->channels = 0; + pDescriptorCapture->sampleRate = ((ma_jack_get_sample_rate_proc)pDevice->pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_channel_map_init_standard(ma_standard_channel_map_alsa, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + + ppPorts = ((ma_jack_get_ports_proc)pDevice->pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); + if (ppPorts == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* Need to count the number of ports first so we can allocate some memory. */ + while (ppPorts[pDescriptorCapture->channels] != NULL) { + pDescriptorCapture->channels += 1; + } + + pDevice->jack.ppPortsCapture = (ma_ptr*)ma_malloc(sizeof(*pDevice->jack.ppPortsCapture) * pDescriptorCapture->channels, &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.ppPortsCapture == NULL) { + return MA_OUT_OF_MEMORY; + } + + for (iPort = 0; iPort < pDescriptorCapture->channels; iPort += 1) { + char name[64]; + ma_strcpy_s(name, sizeof(name), "capture"); + ma_itoa_s((int)iPort, name+7, sizeof(name)-7, 10); /* 7 = length of "capture" */ + + pDevice->jack.ppPortsCapture[iPort] = ((ma_jack_port_register_proc)pDevice->pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsInput, 0); + if (pDevice->jack.ppPortsCapture[iPort] == NULL) { + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + ma_device_uninit__jack(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + } + + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + + pDescriptorCapture->periodSizeInFrames = periodSizeInFrames; + pDescriptorCapture->periodCount = 1; /* There's no notion of a period in JACK. Just set to 1. */ + + pDevice->jack.pIntermediaryBufferCapture = (float*)ma_calloc(pDescriptorCapture->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels), &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.pIntermediaryBufferCapture == NULL) { + ma_device_uninit__jack(pDevice); + return MA_OUT_OF_MEMORY; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPort; + const char** ppPorts; + + pDescriptorPlayback->format = ma_format_f32; + pDescriptorPlayback->channels = 0; + pDescriptorPlayback->sampleRate = ((ma_jack_get_sample_rate_proc)pDevice->pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_channel_map_init_standard(ma_standard_channel_map_alsa, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels); + + ppPorts = ((ma_jack_get_ports_proc)pDevice->pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); + if (ppPorts == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* Need to count the number of ports first so we can allocate some memory. */ + while (ppPorts[pDescriptorPlayback->channels] != NULL) { + pDescriptorPlayback->channels += 1; + } + + pDevice->jack.ppPortsPlayback = (ma_ptr*)ma_malloc(sizeof(*pDevice->jack.ppPortsPlayback) * pDescriptorPlayback->channels, &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.ppPortsPlayback == NULL) { + ma_free(pDevice->jack.ppPortsCapture, &pDevice->pContext->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + for (iPort = 0; iPort < pDescriptorPlayback->channels; iPort += 1) { + char name[64]; + ma_strcpy_s(name, sizeof(name), "playback"); + ma_itoa_s((int)iPort, name+8, sizeof(name)-8, 10); /* 8 = length of "playback" */ + + pDevice->jack.ppPortsPlayback[iPort] = ((ma_jack_port_register_proc)pDevice->pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsOutput, 0); + if (pDevice->jack.ppPortsPlayback[iPort] == NULL) { + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + ma_device_uninit__jack(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + } + + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + + pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames; + pDescriptorPlayback->periodCount = 1; /* There's no notion of a period in JACK. Just set to 1. */ + + pDevice->jack.pIntermediaryBufferPlayback = (float*)ma_calloc(pDescriptorPlayback->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels), &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.pIntermediaryBufferPlayback == NULL) { + ma_device_uninit__jack(pDevice); + return MA_OUT_OF_MEMORY; + } + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_start__jack(ma_device* pDevice) +{ + ma_context* pContext = pDevice->pContext; + int resultJACK; + size_t i; + + resultJACK = ((ma_jack_activate_proc)pContext->jack.jack_activate)((ma_jack_client_t*)pDevice->jack.pClient); + if (resultJACK != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to activate the JACK client."); + return MA_FAILED_TO_START_BACKEND_DEVICE; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); + if (ppServerPorts == NULL) { + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports."); + return MA_ERROR; + } + + for (i = 0; ppServerPorts[i] != NULL; ++i) { + const char* pServerPort = ppServerPorts[i]; + const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.ppPortsCapture[i]); + + resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pServerPort, pClientPort); + if (resultJACK != 0) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports."); + return MA_ERROR; + } + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); + if (ppServerPorts == NULL) { + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports."); + return MA_ERROR; + } + + for (i = 0; ppServerPorts[i] != NULL; ++i) { + const char* pServerPort = ppServerPorts[i]; + const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.ppPortsPlayback[i]); + + resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pClientPort, pServerPort); + if (resultJACK != 0) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports."); + return MA_ERROR; + } + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__jack(ma_device* pDevice) +{ + ma_context* pContext = pDevice->pContext; + + if (((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient) != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] An error occurred when deactivating the JACK client."); + return MA_ERROR; + } + + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__jack(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_jack); + + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); + pContext->jack.pClientName = NULL; + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->jack.jackSO); +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init__jack(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libjackNames[] = { +#if defined(MA_WIN32) + "libjack.dll", + "libjack64.dll" +#endif +#if defined(MA_UNIX) + "libjack.so", + "libjack.so.0" +#endif + }; + size_t i; + + for (i = 0; i < ma_countof(libjackNames); ++i) { + pContext->jack.jackSO = ma_dlopen(ma_context_get_log(pContext), libjackNames[i]); + if (pContext->jack.jackSO != NULL) { + break; + } + } + + if (pContext->jack.jackSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->jack.jack_client_open = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_client_open"); + pContext->jack.jack_client_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_client_close"); + pContext->jack.jack_client_name_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_client_name_size"); + pContext->jack.jack_set_process_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_set_process_callback"); + pContext->jack.jack_set_buffer_size_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_set_buffer_size_callback"); + pContext->jack.jack_on_shutdown = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_on_shutdown"); + pContext->jack.jack_get_sample_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_get_sample_rate"); + pContext->jack.jack_get_buffer_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_get_buffer_size"); + pContext->jack.jack_get_ports = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_get_ports"); + pContext->jack.jack_activate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_activate"); + pContext->jack.jack_deactivate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_deactivate"); + pContext->jack.jack_connect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_connect"); + pContext->jack.jack_port_register = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_port_register"); + pContext->jack.jack_port_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_port_name"); + pContext->jack.jack_port_get_buffer = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_port_get_buffer"); + pContext->jack.jack_free = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_free"); +#else + /* + This strange assignment system is here just to ensure type safety of miniaudio's function pointer + types. If anything differs slightly the compiler should throw a warning. + */ + ma_jack_client_open_proc _jack_client_open = jack_client_open; + ma_jack_client_close_proc _jack_client_close = jack_client_close; + ma_jack_client_name_size_proc _jack_client_name_size = jack_client_name_size; + ma_jack_set_process_callback_proc _jack_set_process_callback = jack_set_process_callback; + ma_jack_set_buffer_size_callback_proc _jack_set_buffer_size_callback = jack_set_buffer_size_callback; + ma_jack_on_shutdown_proc _jack_on_shutdown = jack_on_shutdown; + ma_jack_get_sample_rate_proc _jack_get_sample_rate = jack_get_sample_rate; + ma_jack_get_buffer_size_proc _jack_get_buffer_size = jack_get_buffer_size; + ma_jack_get_ports_proc _jack_get_ports = jack_get_ports; + ma_jack_activate_proc _jack_activate = jack_activate; + ma_jack_deactivate_proc _jack_deactivate = jack_deactivate; + ma_jack_connect_proc _jack_connect = jack_connect; + ma_jack_port_register_proc _jack_port_register = jack_port_register; + ma_jack_port_name_proc _jack_port_name = jack_port_name; + ma_jack_port_get_buffer_proc _jack_port_get_buffer = jack_port_get_buffer; + ma_jack_free_proc _jack_free = jack_free; + + pContext->jack.jack_client_open = (ma_proc)_jack_client_open; + pContext->jack.jack_client_close = (ma_proc)_jack_client_close; + pContext->jack.jack_client_name_size = (ma_proc)_jack_client_name_size; + pContext->jack.jack_set_process_callback = (ma_proc)_jack_set_process_callback; + pContext->jack.jack_set_buffer_size_callback = (ma_proc)_jack_set_buffer_size_callback; + pContext->jack.jack_on_shutdown = (ma_proc)_jack_on_shutdown; + pContext->jack.jack_get_sample_rate = (ma_proc)_jack_get_sample_rate; + pContext->jack.jack_get_buffer_size = (ma_proc)_jack_get_buffer_size; + pContext->jack.jack_get_ports = (ma_proc)_jack_get_ports; + pContext->jack.jack_activate = (ma_proc)_jack_activate; + pContext->jack.jack_deactivate = (ma_proc)_jack_deactivate; + pContext->jack.jack_connect = (ma_proc)_jack_connect; + pContext->jack.jack_port_register = (ma_proc)_jack_port_register; + pContext->jack.jack_port_name = (ma_proc)_jack_port_name; + pContext->jack.jack_port_get_buffer = (ma_proc)_jack_port_get_buffer; + pContext->jack.jack_free = (ma_proc)_jack_free; +#endif + + if (pConfig->jack.pClientName != NULL) { + pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName, &pContext->allocationCallbacks); + } + pContext->jack.tryStartServer = pConfig->jack.tryStartServer; + + /* + Getting here means the JACK library is installed, but it doesn't necessarily mean it's usable. We need to quickly test this by connecting + a temporary client. + */ + { + ma_jack_client_t* pDummyClient; + ma_result result = ma_context_open_client__jack(pContext, &pDummyClient); + if (result != MA_SUCCESS) { + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->jack.jackSO); +#endif + return MA_NO_BACKEND; + } + + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDummyClient); + } + + + pCallbacks->onContextInit = ma_context_init__jack; + pCallbacks->onContextUninit = ma_context_uninit__jack; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__jack; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__jack; + pCallbacks->onDeviceInit = ma_device_init__jack; + pCallbacks->onDeviceUninit = ma_device_uninit__jack; + pCallbacks->onDeviceStart = ma_device_start__jack; + pCallbacks->onDeviceStop = ma_device_stop__jack; + pCallbacks->onDeviceRead = NULL; /* Not used because JACK is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not used because JACK is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not used because JACK is asynchronous. */ + + return MA_SUCCESS; +} +#endif /* JACK */ + + + +/****************************************************************************** + +Core Audio Backend + +References +========== +- Technical Note TN2091: Device input using the HAL Output Audio Unit +https://developer.apple.com/library/archive/technotes/tn2091/_index.html + +******************************************************************************/ +#ifdef MA_HAS_COREAUDIO +#include + +#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1 +#define MA_APPLE_MOBILE +#if defined(TARGET_OS_TV) && TARGET_OS_TV == 1 +#define MA_APPLE_TV +#endif +#if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1 +#define MA_APPLE_WATCH +#endif +#if __has_feature(objc_arc) +#define MA_BRIDGE_TRANSFER __bridge_transfer +#define MA_BRIDGE_RETAINED __bridge_retained +#else +#define MA_BRIDGE_TRANSFER +#define MA_BRIDGE_RETAINED +#endif +#else +#define MA_APPLE_DESKTOP +#endif + +#if defined(MA_APPLE_DESKTOP) +#include +#else +#include +#endif + +#include + +/* CoreFoundation */ +typedef Boolean (* ma_CFStringGetCString_proc)(CFStringRef theString, char* buffer, CFIndex bufferSize, CFStringEncoding encoding); +typedef void (* ma_CFRelease_proc)(CFTypeRef cf); + +/* CoreAudio */ +#if defined(MA_APPLE_DESKTOP) +typedef OSStatus (* ma_AudioObjectGetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* ioDataSize, void* outData); +typedef OSStatus (* ma_AudioObjectGetPropertyDataSize_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* outDataSize); +typedef OSStatus (* ma_AudioObjectSetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32 inDataSize, const void* inData); +typedef OSStatus (* ma_AudioObjectAddPropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData); +typedef OSStatus (* ma_AudioObjectRemovePropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData); +#endif + +/* AudioToolbox */ +typedef AudioComponent (* ma_AudioComponentFindNext_proc)(AudioComponent inComponent, const AudioComponentDescription* inDesc); +typedef OSStatus (* ma_AudioComponentInstanceDispose_proc)(AudioComponentInstance inInstance); +typedef OSStatus (* ma_AudioComponentInstanceNew_proc)(AudioComponent inComponent, AudioComponentInstance* outInstance); +typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData); +typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable); +typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize); +typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize); +typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData); + + +#define MA_COREAUDIO_OUTPUT_BUS 0 +#define MA_COREAUDIO_INPUT_BUS 1 + +#if defined(MA_APPLE_DESKTOP) +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit); +#endif + +/* +Core Audio + +So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation +apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose +needing to figure out how this darn thing works, I'm going to outline a few things here. + +Since miniaudio is a fairly low-level API, one of the things it needs is control over specific devices, and it needs to be +able to identify whether or not it can be used as playback and/or capture. The AudioObject API is the only one I've seen +that supports this level of detail. There was some public domain sample code I stumbled across that used the AudioComponent +and AudioUnit APIs, but I couldn't see anything that gave low-level control over device selection and capabilities (the +distinction between playback and capture in particular). Therefore, miniaudio is using the AudioObject API. + +Most (all?) functions in the AudioObject API take a AudioObjectID as it's input. This is the device identifier. When +retrieving global information, such as the device list, you use kAudioObjectSystemObject. When retrieving device-specific +data, you pass in the ID for that device. In order to retrieve device-specific IDs you need to enumerate over each of the +devices. This is done using the AudioObjectGetPropertyDataSize() and AudioObjectGetPropertyData() APIs which seem to be +the central APIs for retrieving information about the system and specific devices. + +To use the AudioObjectGetPropertyData() API you need to use the notion of a property address. A property address is a +structure with three variables and is used to identify which property you are getting or setting. The first is the "selector" +which is basically the specific property that you're wanting to retrieve or set. The second is the "scope", which is +typically set to kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeInput for input-specific properties and +kAudioObjectPropertyScopeOutput for output-specific properties. The last is the "element" which is always set to +kAudioObjectPropertyElementMain in miniaudio's case. I don't know of any cases where this would be set to anything different. + +Back to the earlier issue of device retrieval, you first use the AudioObjectGetPropertyDataSize() API to retrieve the size +of the raw data which is just a list of AudioDeviceID's. You use the kAudioObjectSystemObject AudioObjectID, and a property +address with the kAudioHardwarePropertyDevices selector and the kAudioObjectPropertyScopeGlobal scope. Once you have the +size, allocate a block of memory of that size and then call AudioObjectGetPropertyData(). The data is just a list of +AudioDeviceID's so just do "dataSize/sizeof(AudioDeviceID)" to know the device count. +*/ + +static ma_result ma_result_from_OSStatus(OSStatus status) +{ + switch (status) + { + case noErr: return MA_SUCCESS; +#if defined(MA_APPLE_DESKTOP) + case kAudioHardwareNotRunningError: return MA_DEVICE_NOT_STARTED; + case kAudioHardwareUnspecifiedError: return MA_ERROR; + case kAudioHardwareUnknownPropertyError: return MA_INVALID_ARGS; + case kAudioHardwareBadPropertySizeError: return MA_INVALID_OPERATION; + case kAudioHardwareIllegalOperationError: return MA_INVALID_OPERATION; + case kAudioHardwareBadObjectError: return MA_INVALID_ARGS; + case kAudioHardwareBadDeviceError: return MA_INVALID_ARGS; + case kAudioHardwareBadStreamError: return MA_INVALID_ARGS; + case kAudioHardwareUnsupportedOperationError: return MA_INVALID_OPERATION; + case kAudioDeviceUnsupportedFormatError: return MA_FORMAT_NOT_SUPPORTED; + case kAudioDevicePermissionsError: return MA_ACCESS_DENIED; +#endif + default: return MA_ERROR; + } +} + +#if 0 +static ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit) +{ + switch (bit) + { + case kAudioChannelBit_Left: return MA_CHANNEL_LEFT; + case kAudioChannelBit_Right: return MA_CHANNEL_RIGHT; + case kAudioChannelBit_Center: return MA_CHANNEL_FRONT_CENTER; + case kAudioChannelBit_LFEScreen: return MA_CHANNEL_LFE; + case kAudioChannelBit_LeftSurround: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelBit_RightSurround: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelBit_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER; + case kAudioChannelBit_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case kAudioChannelBit_CenterSurround: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelBit_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelBit_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelBit_TopCenterSurround: return MA_CHANNEL_TOP_CENTER; + case kAudioChannelBit_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT; + case kAudioChannelBit_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER; + case kAudioChannelBit_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT; + case kAudioChannelBit_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT; + case kAudioChannelBit_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER; + case kAudioChannelBit_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return MA_CHANNEL_NONE; + } +} +#endif + +static ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut) +{ + MA_ASSERT(pDescription != NULL); + MA_ASSERT(pFormatOut != NULL); + + *pFormatOut = ma_format_unknown; /* Safety. */ + + /* There's a few things miniaudio doesn't support. */ + if (pDescription->mFormatID != kAudioFormatLinearPCM) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* We don't support any non-packed formats that are aligned high. */ + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsAlignedHigh) != 0) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* Only supporting native-endian. */ + if ((ma_is_little_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) || (ma_is_big_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) == 0)) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* We are not currently supporting non-interleaved formats (this will be added in a future version of miniaudio). */ + /*if ((pDescription->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0) { + return MA_FORMAT_NOT_SUPPORTED; + }*/ + + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsFloat) != 0) { + if (pDescription->mBitsPerChannel == 32) { + *pFormatOut = ma_format_f32; + return MA_SUCCESS; + } + } else { + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsSignedInteger) != 0) { + if (pDescription->mBitsPerChannel == 16) { + *pFormatOut = ma_format_s16; + return MA_SUCCESS; + } else if (pDescription->mBitsPerChannel == 24) { + if (pDescription->mBytesPerFrame == (pDescription->mBitsPerChannel/8 * pDescription->mChannelsPerFrame)) { + *pFormatOut = ma_format_s24; + return MA_SUCCESS; + } else { + if (pDescription->mBytesPerFrame/pDescription->mChannelsPerFrame == sizeof(ma_int32)) { + /* TODO: Implement ma_format_s24_32. */ + /**pFormatOut = ma_format_s24_32;*/ + /*return MA_SUCCESS;*/ + return MA_FORMAT_NOT_SUPPORTED; + } + } + } else if (pDescription->mBitsPerChannel == 32) { + *pFormatOut = ma_format_s32; + return MA_SUCCESS; + } + } else { + if (pDescription->mBitsPerChannel == 8) { + *pFormatOut = ma_format_u8; + return MA_SUCCESS; + } + } + } + + /* Getting here means the format is not supported. */ + return MA_FORMAT_NOT_SUPPORTED; +} + +#if defined(MA_APPLE_DESKTOP) +static ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label) +{ + switch (label) + { + case kAudioChannelLabel_Unknown: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Unused: return MA_CHANNEL_NONE; + case kAudioChannelLabel_UseCoordinates: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Left: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_Right: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_Center: return MA_CHANNEL_FRONT_CENTER; + case kAudioChannelLabel_LFEScreen: return MA_CHANNEL_LFE; + case kAudioChannelLabel_LeftSurround: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelLabel_RightSurround: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelLabel_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER; + case kAudioChannelLabel_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case kAudioChannelLabel_CenterSurround: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelLabel_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelLabel_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelLabel_TopCenterSurround: return MA_CHANNEL_TOP_CENTER; + case kAudioChannelLabel_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT; + case kAudioChannelLabel_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER; + case kAudioChannelLabel_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT; + case kAudioChannelLabel_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT; + case kAudioChannelLabel_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER; + case kAudioChannelLabel_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT; + case kAudioChannelLabel_RearSurroundLeft: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelLabel_RearSurroundRight: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelLabel_LeftWide: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelLabel_RightWide: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelLabel_LFE2: return MA_CHANNEL_LFE; + case kAudioChannelLabel_LeftTotal: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_RightTotal: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_HearingImpaired: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Narration: return MA_CHANNEL_MONO; + case kAudioChannelLabel_Mono: return MA_CHANNEL_MONO; + case kAudioChannelLabel_DialogCentricMix: return MA_CHANNEL_MONO; + case kAudioChannelLabel_CenterSurroundDirect: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelLabel_Haptic: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_W: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_X: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_Y: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_Z: return MA_CHANNEL_NONE; + case kAudioChannelLabel_MS_Mid: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_MS_Side: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_XY_X: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_XY_Y: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_HeadphonesLeft: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_HeadphonesRight: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_ClickTrack: return MA_CHANNEL_NONE; + case kAudioChannelLabel_ForeignLanguage: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Discrete: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Discrete_0: return MA_CHANNEL_AUX_0; + case kAudioChannelLabel_Discrete_1: return MA_CHANNEL_AUX_1; + case kAudioChannelLabel_Discrete_2: return MA_CHANNEL_AUX_2; + case kAudioChannelLabel_Discrete_3: return MA_CHANNEL_AUX_3; + case kAudioChannelLabel_Discrete_4: return MA_CHANNEL_AUX_4; + case kAudioChannelLabel_Discrete_5: return MA_CHANNEL_AUX_5; + case kAudioChannelLabel_Discrete_6: return MA_CHANNEL_AUX_6; + case kAudioChannelLabel_Discrete_7: return MA_CHANNEL_AUX_7; + case kAudioChannelLabel_Discrete_8: return MA_CHANNEL_AUX_8; + case kAudioChannelLabel_Discrete_9: return MA_CHANNEL_AUX_9; + case kAudioChannelLabel_Discrete_10: return MA_CHANNEL_AUX_10; + case kAudioChannelLabel_Discrete_11: return MA_CHANNEL_AUX_11; + case kAudioChannelLabel_Discrete_12: return MA_CHANNEL_AUX_12; + case kAudioChannelLabel_Discrete_13: return MA_CHANNEL_AUX_13; + case kAudioChannelLabel_Discrete_14: return MA_CHANNEL_AUX_14; + case kAudioChannelLabel_Discrete_15: return MA_CHANNEL_AUX_15; + case kAudioChannelLabel_Discrete_65535: return MA_CHANNEL_NONE; + +#if 0 /* Introduced in a later version of macOS. */ + case kAudioChannelLabel_HOA_ACN: return MA_CHANNEL_NONE; + case kAudioChannelLabel_HOA_ACN_0: return MA_CHANNEL_AUX_0; + case kAudioChannelLabel_HOA_ACN_1: return MA_CHANNEL_AUX_1; + case kAudioChannelLabel_HOA_ACN_2: return MA_CHANNEL_AUX_2; + case kAudioChannelLabel_HOA_ACN_3: return MA_CHANNEL_AUX_3; + case kAudioChannelLabel_HOA_ACN_4: return MA_CHANNEL_AUX_4; + case kAudioChannelLabel_HOA_ACN_5: return MA_CHANNEL_AUX_5; + case kAudioChannelLabel_HOA_ACN_6: return MA_CHANNEL_AUX_6; + case kAudioChannelLabel_HOA_ACN_7: return MA_CHANNEL_AUX_7; + case kAudioChannelLabel_HOA_ACN_8: return MA_CHANNEL_AUX_8; + case kAudioChannelLabel_HOA_ACN_9: return MA_CHANNEL_AUX_9; + case kAudioChannelLabel_HOA_ACN_10: return MA_CHANNEL_AUX_10; + case kAudioChannelLabel_HOA_ACN_11: return MA_CHANNEL_AUX_11; + case kAudioChannelLabel_HOA_ACN_12: return MA_CHANNEL_AUX_12; + case kAudioChannelLabel_HOA_ACN_13: return MA_CHANNEL_AUX_13; + case kAudioChannelLabel_HOA_ACN_14: return MA_CHANNEL_AUX_14; + case kAudioChannelLabel_HOA_ACN_15: return MA_CHANNEL_AUX_15; + case kAudioChannelLabel_HOA_ACN_65024: return MA_CHANNEL_NONE; +#endif + + default: return MA_CHANNEL_NONE; + } +} + +static ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel* pChannelMap, size_t channelMapCap) +{ + MA_ASSERT(pChannelLayout != NULL); + + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { + UInt32 iChannel; + for (iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions && iChannel < channelMapCap; ++iChannel) { + pChannelMap[iChannel] = ma_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel); + } + } else +#if 0 + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { + /* This is the same kind of system that's used by Windows audio APIs. */ + UInt32 iChannel = 0; + UInt32 iBit; + AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap; + for (iBit = 0; iBit < 32 && iChannel < channelMapCap; ++iBit) { + AudioChannelBitmap bit = bitmap & (1 << iBit); + if (bit != 0) { + pChannelMap[iChannel++] = ma_channel_from_AudioChannelBit(bit); + } + } + } else +#endif + { + /* + Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should + be updated to determine the mapping based on the tag. + */ + UInt32 channelCount; + + /* Our channel map retrieval APIs below take 32-bit integers, so we'll want to clamp the channel map capacity. */ + if (channelMapCap > 0xFFFFFFFF) { + channelMapCap = 0xFFFFFFFF; + } + + channelCount = ma_min(AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag), (UInt32)channelMapCap); + + switch (pChannelLayout->mChannelLayoutTag) + { + case kAudioChannelLayoutTag_Mono: + case kAudioChannelLayoutTag_Stereo: + case kAudioChannelLayoutTag_StereoHeadphones: + case kAudioChannelLayoutTag_MatrixStereo: + case kAudioChannelLayoutTag_MidSide: + case kAudioChannelLayoutTag_XY: + case kAudioChannelLayoutTag_Binaural: + case kAudioChannelLayoutTag_Ambisonic_B_Format: + { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount); + } break; + + case kAudioChannelLayoutTag_Octagonal: + { + pChannelMap[7] = MA_CHANNEL_SIDE_RIGHT; + pChannelMap[6] = MA_CHANNEL_SIDE_LEFT; + } MA_FALLTHROUGH; /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Hexagonal: + { + pChannelMap[5] = MA_CHANNEL_BACK_CENTER; + } MA_FALLTHROUGH; /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Pentagonal: + { + pChannelMap[4] = MA_CHANNEL_FRONT_CENTER; + } MA_FALLTHROUGH; /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Quadraphonic: + { + pChannelMap[3] = MA_CHANNEL_BACK_RIGHT; + pChannelMap[2] = MA_CHANNEL_BACK_LEFT; + pChannelMap[1] = MA_CHANNEL_RIGHT; + pChannelMap[0] = MA_CHANNEL_LEFT; + } break; + + /* TODO: Add support for more tags here. */ + + default: + { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount); + } break; + } + } + + return MA_SUCCESS; +} + +#if (defined(MAC_OS_VERSION_12_0) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_12_0) || \ + (defined(__IPHONE_15_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_15_0) +#define AUDIO_OBJECT_PROPERTY_ELEMENT kAudioObjectPropertyElementMain +#else +/* kAudioObjectPropertyElementMaster is deprecated. */ +#define AUDIO_OBJECT_PROPERTY_ELEMENT kAudioObjectPropertyElementMaster +#endif + +static ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddressDevices; + UInt32 deviceObjectsDataSize; + OSStatus status; + AudioObjectID* pDeviceObjectIDs; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceCount != NULL); + MA_ASSERT(ppDeviceObjectIDs != NULL); + + /* Safety. */ + *pDeviceCount = 0; + *ppDeviceObjectIDs = NULL; + + propAddressDevices.mSelector = kAudioHardwarePropertyDevices; + propAddressDevices.mScope = kAudioObjectPropertyScopeGlobal; + propAddressDevices.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize, &pContext->allocationCallbacks); + if (pDeviceObjectIDs == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize, pDeviceObjectIDs); + if (status != noErr) { + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pDeviceCount = deviceObjectsDataSize / sizeof(AudioObjectID); + *ppDeviceObjectIDs = pDeviceObjectIDs; + + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID) +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + propAddress.mSelector = kAudioDevicePropertyDeviceUID; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + dataSize = sizeof(*pUID); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, pUID); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +{ + CFStringRef uid; + ma_result result; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_uid_as_CFStringRef(pContext, objectID, &uid); + if (result != MA_SUCCESS) { + return result; + } + + if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(uid, bufferOut, bufferSize, kCFStringEncodingUTF8)) { + return MA_ERROR; + } + + ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(uid); + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +{ + AudioObjectPropertyAddress propAddress; + CFStringRef deviceName = NULL; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + propAddress.mSelector = kAudioDevicePropertyDeviceNameCFString; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + dataSize = sizeof(deviceName); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, &deviceName); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(deviceName, bufferOut, bufferSize, kCFStringEncodingUTF8)) { + return MA_ERROR; + } + + ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(deviceName); + return MA_SUCCESS; +} + +static ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope) +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioBufferList* pBufferList; + ma_bool32 isSupported; + + MA_ASSERT(pContext != NULL); + + /* To know whether or not a device is an input device we need ot look at the stream configuration. If it has an output channel it's a playback device. */ + propAddress.mSelector = kAudioDevicePropertyStreamConfiguration; + propAddress.mScope = scope; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return MA_FALSE; + } + + pBufferList = (AudioBufferList*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pBufferList == NULL) { + return MA_FALSE; /* Out of memory. */ + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pBufferList); + if (status != noErr) { + ma_free(pBufferList, &pContext->allocationCallbacks); + return MA_FALSE; + } + + isSupported = MA_FALSE; + if (pBufferList->mNumberBuffers > 0) { + isSupported = MA_TRUE; + } + + ma_free(pBufferList, &pContext->allocationCallbacks); + return isSupported; +} + +static ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID) +{ + return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeOutput); +} + +static ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID) +{ + return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeInput); +} + + +static ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioStreamRangedDescription* pDescriptions; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDescriptionCount != NULL); + MA_ASSERT(ppDescriptions != NULL); + + /* + TODO: Experiment with kAudioStreamPropertyAvailablePhysicalFormats instead of (or in addition to) kAudioStreamPropertyAvailableVirtualFormats. My + MacBook Pro uses s24/32 format, however, which miniaudio does not currently support. + */ + propAddress.mSelector = kAudioStreamPropertyAvailableVirtualFormats; /*kAudioStreamPropertyAvailablePhysicalFormats;*/ + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pDescriptions == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pDescriptions); + if (status != noErr) { + ma_free(pDescriptions, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pDescriptionCount = dataSize / sizeof(*pDescriptions); + *ppDescriptions = pDescriptions; + return MA_SUCCESS; +} + + +static ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioChannelLayout* pChannelLayout; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppChannelLayout != NULL); + + *ppChannelLayout = NULL; /* Safety. */ + + propAddress.mSelector = kAudioDevicePropertyPreferredChannelLayout; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pChannelLayout == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pChannelLayout); + if (status != noErr) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *ppChannelLayout = pChannelLayout; + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount) +{ + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pChannelCount != NULL); + + *pChannelCount = 0; /* Safety. */ + + result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout); + if (result != MA_SUCCESS) { + return result; + } + + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { + *pChannelCount = pChannelLayout->mNumberChannelDescriptions; + } else if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { + *pChannelCount = ma_count_set_bits(pChannelLayout->mChannelBitmap); + } else { + *pChannelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag); + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return MA_SUCCESS; +} + +#if 0 +static ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel* pChannelMap, size_t channelMapCap) +{ + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout); + if (result != MA_SUCCESS) { + return result; /* Rather than always failing here, would it be more robust to simply assume a default? */ + } + + result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, pChannelMap, channelMapCap); + if (result != MA_SUCCESS) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; +} +#endif + +static ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioValueRange* pSampleRateRanges; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateRangesCount != NULL); + MA_ASSERT(ppSampleRateRanges != NULL); + + /* Safety. */ + *pSampleRateRangesCount = 0; + *ppSampleRateRanges = NULL; + + propAddress.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pSampleRateRanges == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pSampleRateRanges); + if (status != noErr) { + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pSampleRateRangesCount = dataSize / sizeof(*pSampleRateRanges); + *ppSampleRateRanges = pSampleRateRanges; + return MA_SUCCESS; +} + +#if 0 +static ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut) +{ + UInt32 sampleRateRangeCount; + AudioValueRange* pSampleRateRanges; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateOut != NULL); + + *pSampleRateOut = 0; /* Safety. */ + + result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges); + if (result != MA_SUCCESS) { + return result; + } + + if (sampleRateRangeCount == 0) { + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_ERROR; /* Should never hit this case should we? */ + } + + if (sampleRateIn == 0) { + /* Search in order of miniaudio's preferred priority. */ + UInt32 iMALSampleRate; + for (iMALSampleRate = 0; iMALSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iMALSampleRate) { + ma_uint32 malSampleRate = g_maStandardSampleRatePriorities[iMALSampleRate]; + UInt32 iCASampleRate; + for (iCASampleRate = 0; iCASampleRate < sampleRateRangeCount; ++iCASampleRate) { + AudioValueRange caSampleRate = pSampleRateRanges[iCASampleRate]; + if (caSampleRate.mMinimum <= malSampleRate && caSampleRate.mMaximum >= malSampleRate) { + *pSampleRateOut = malSampleRate; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } + + /* + If we get here it means none of miniaudio's standard sample rates matched any of the supported sample rates from the device. In this + case we just fall back to the first one reported by Core Audio. + */ + MA_ASSERT(sampleRateRangeCount > 0); + + *pSampleRateOut = pSampleRateRanges[0].mMinimum; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } else { + /* Find the closest match to this sample rate. */ + UInt32 currentAbsoluteDifference = INT32_MAX; + UInt32 iCurrentClosestRange = (UInt32)-1; + UInt32 iRange; + for (iRange = 0; iRange < sampleRateRangeCount; ++iRange) { + if (pSampleRateRanges[iRange].mMinimum <= sampleRateIn && pSampleRateRanges[iRange].mMaximum >= sampleRateIn) { + *pSampleRateOut = sampleRateIn; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } else { + UInt32 absoluteDifference; + if (pSampleRateRanges[iRange].mMinimum > sampleRateIn) { + absoluteDifference = pSampleRateRanges[iRange].mMinimum - sampleRateIn; + } else { + absoluteDifference = sampleRateIn - pSampleRateRanges[iRange].mMaximum; + } + + if (currentAbsoluteDifference > absoluteDifference) { + currentAbsoluteDifference = absoluteDifference; + iCurrentClosestRange = iRange; + } + } + } + + MA_ASSERT(iCurrentClosestRange != (UInt32)-1); + + *pSampleRateOut = pSampleRateRanges[iCurrentClosestRange].mMinimum; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + + /* Should never get here, but it would mean we weren't able to find any suitable sample rates. */ + /*ma_free(pSampleRateRanges, &pContext->allocationCallbacks);*/ + /*return MA_ERROR;*/ +} +#endif + +static ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut) +{ + AudioObjectPropertyAddress propAddress; + AudioValueRange bufferSizeRange; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pBufferSizeInFramesOut != NULL); + + *pBufferSizeInFramesOut = 0; /* Safety. */ + + propAddress.mSelector = kAudioDevicePropertyBufferFrameSizeRange; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + dataSize = sizeof(bufferSizeRange); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &bufferSizeRange); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + /* This is just a clamp. */ + if (bufferSizeInFramesIn < bufferSizeRange.mMinimum) { + *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMinimum; + } else if (bufferSizeInFramesIn > bufferSizeRange.mMaximum) { + *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMaximum; + } else { + *pBufferSizeInFramesOut = bufferSizeInFramesIn; + } + + return MA_SUCCESS; +} + +static ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pPeriodSizeInOut) +{ + ma_result result; + ma_uint32 chosenBufferSizeInFrames; + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_closest_buffer_size_in_frames(pContext, deviceObjectID, deviceType, *pPeriodSizeInOut, &chosenBufferSizeInFrames); + if (result != MA_SUCCESS) { + return result; + } + + /* Try setting the size of the buffer... If this fails we just use whatever is currently set. */ + propAddress.mSelector = kAudioDevicePropertyBufferFrameSize; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(chosenBufferSizeInFrames), &chosenBufferSizeInFrames); + + /* Get the actual size of the buffer. */ + dataSize = sizeof(*pPeriodSizeInOut); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &chosenBufferSizeInFrames); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + *pPeriodSizeInOut = chosenBufferSizeInFrames; + return MA_SUCCESS; +} + +static ma_result ma_find_default_AudioObjectID(ma_context* pContext, ma_device_type deviceType, AudioObjectID* pDeviceObjectID) +{ + AudioObjectPropertyAddress propAddressDefaultDevice; + UInt32 defaultDeviceObjectIDSize = sizeof(AudioObjectID); + AudioObjectID defaultDeviceObjectID; + OSStatus status; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceObjectID != NULL); + + /* Safety. */ + *pDeviceObjectID = 0; + + propAddressDefaultDevice.mScope = kAudioObjectPropertyScopeGlobal; + propAddressDefaultDevice.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + if (deviceType == ma_device_type_playback) { + propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + } else { + propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultInputDevice; + } + + defaultDeviceObjectIDSize = sizeof(AudioObjectID); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDefaultDevice, 0, NULL, &defaultDeviceObjectIDSize, &defaultDeviceObjectID); + if (status == noErr) { + *pDeviceObjectID = defaultDeviceObjectID; + return MA_SUCCESS; + } + + /* If we get here it means we couldn't find the device. */ + return MA_NO_DEVICE; +} + +static ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceObjectID != NULL); + + /* Safety. */ + *pDeviceObjectID = 0; + + if (pDeviceID == NULL) { + /* Default device. */ + return ma_find_default_AudioObjectID(pContext, deviceType, pDeviceObjectID); + } else { + /* Explicit device. */ + UInt32 deviceCount; + AudioObjectID* pDeviceObjectIDs; + ma_result result; + UInt32 iDevice; + + result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs); + if (result != MA_SUCCESS) { + return result; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice]; + + char uid[256]; + if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(uid), uid) != MA_SUCCESS) { + continue; + } + + if (deviceType == ma_device_type_playback) { + if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) { + if (strcmp(uid, pDeviceID->coreaudio) == 0) { + *pDeviceObjectID = deviceObjectID; + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } else { + if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) { + if (strcmp(uid, pDeviceID->coreaudio) == 0) { + *pDeviceObjectID = deviceObjectID; + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } + } + + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + } + + /* If we get here it means we couldn't find the device. */ + return MA_NO_DEVICE; +} + + +static ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const AudioStreamBasicDescription* pOrigFormat, AudioStreamBasicDescription* pFormat) +{ + UInt32 deviceFormatDescriptionCount; + AudioStreamRangedDescription* pDeviceFormatDescriptions; + ma_result result; + ma_uint32 desiredSampleRate; + ma_uint32 desiredChannelCount; + ma_format desiredFormat; + AudioStreamBasicDescription bestDeviceFormatSoFar; + ma_bool32 hasSupportedFormat; + UInt32 iFormat; + + result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &deviceFormatDescriptionCount, &pDeviceFormatDescriptions); + if (result != MA_SUCCESS) { + return result; + } + + desiredSampleRate = sampleRate; + if (desiredSampleRate == 0) { + desiredSampleRate = pOrigFormat->mSampleRate; + } + + desiredChannelCount = channels; + if (desiredChannelCount == 0) { + desiredChannelCount = pOrigFormat->mChannelsPerFrame; + } + + desiredFormat = format; + if (desiredFormat == ma_format_unknown) { + result = ma_format_from_AudioStreamBasicDescription(pOrigFormat, &desiredFormat); + if (result != MA_SUCCESS || desiredFormat == ma_format_unknown) { + desiredFormat = g_maFormatPriorities[0]; + } + } + + /* + If we get here it means we don't have an exact match to what the client is asking for. We'll need to find the closest one. The next + loop will check for formats that have the same sample rate to what we're asking for. If there is, we prefer that one in all cases. + */ + MA_ZERO_OBJECT(&bestDeviceFormatSoFar); + + hasSupportedFormat = MA_FALSE; + for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) { + ma_format format; + ma_result formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &format); + if (formatResult == MA_SUCCESS && format != ma_format_unknown) { + hasSupportedFormat = MA_TRUE; + bestDeviceFormatSoFar = pDeviceFormatDescriptions[iFormat].mFormat; + break; + } + } + + if (!hasSupportedFormat) { + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); + return MA_FORMAT_NOT_SUPPORTED; + } + + + for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) { + AudioStreamBasicDescription thisDeviceFormat = pDeviceFormatDescriptions[iFormat].mFormat; + ma_format thisSampleFormat; + ma_result formatResult; + ma_format bestSampleFormatSoFar; + + /* If the format is not supported by miniaudio we need to skip this one entirely. */ + formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &thisSampleFormat); + if (formatResult != MA_SUCCESS || thisSampleFormat == ma_format_unknown) { + continue; /* The format is not supported by miniaudio. Skip. */ + } + + ma_format_from_AudioStreamBasicDescription(&bestDeviceFormatSoFar, &bestSampleFormatSoFar); + + /* Getting here means the format is supported by miniaudio which makes this format a candidate. */ + if (thisDeviceFormat.mSampleRate != desiredSampleRate) { + /* + The sample rate does not match, but this format could still be usable, although it's a very low priority. If the best format + so far has an equal sample rate we can just ignore this one. + */ + if (bestDeviceFormatSoFar.mSampleRate == desiredSampleRate) { + continue; /* The best sample rate so far has the same sample rate as what we requested which means it's still the best so far. Skip this format. */ + } else { + /* In this case, neither the best format so far nor this one have the same sample rate. Check the channel count next. */ + if (thisDeviceFormat.mChannelsPerFrame != desiredChannelCount) { + /* This format has a different sample rate _and_ a different channel count. */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + continue; /* No change to the best format. */ + } else { + /* + Both this format and the best so far have different sample rates and different channel counts. Whichever has the + best format is the new best. + */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format. */ + } + } + } else { + /* This format has a different sample rate but the desired channel count. */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + /* Both this format and the best so far have the desired channel count. Whichever has the best format is the new best. */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } else { + /* This format has the desired channel count, but the best so far does not. We have a new best. */ + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } + } + } + } else { + /* + The sample rates match which makes this format a very high priority contender. If the best format so far has a different + sample rate it needs to be replaced with this one. + */ + if (bestDeviceFormatSoFar.mSampleRate != desiredSampleRate) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + /* In this case both this format and the best format so far have the same sample rate. Check the channel count next. */ + if (thisDeviceFormat.mChannelsPerFrame == desiredChannelCount) { + /* + In this case this format has the same channel count as what the client is requesting. If the best format so far has + a different count, this one becomes the new best. + */ + if (bestDeviceFormatSoFar.mChannelsPerFrame != desiredChannelCount) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + /* In this case both this format and the best so far have the ideal sample rate and channel count. Check the format. */ + if (thisSampleFormat == desiredFormat) { + bestDeviceFormatSoFar = thisDeviceFormat; + break; /* Found the exact match. */ + } else { + /* The formats are different. The new best format is the one with the highest priority format according to miniaudio. */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } + } + } else { + /* + In this case the channel count is different to what the client has requested. If the best so far has the same channel + count as the requested count then it remains the best. + */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + continue; + } else { + /* + This is the case where both have the same sample rate (good) but different channel counts. Right now both have about + the same priority, but we need to compare the format now. + */ + if (thisSampleFormat == bestSampleFormatSoFar) { + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } + } + } + } + } + } + + *pFormat = bestDeviceFormatSoFar; + + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); + return MA_SUCCESS; +} + +static ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel* pChannelMap, size_t channelMapCap) +{ + AudioUnitScope deviceScope; + AudioUnitElement deviceBus; + UInt32 channelLayoutSize; + OSStatus status; + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + + if (deviceType == ma_device_type_playback) { + deviceScope = kAudioUnitScope_Input; + deviceBus = MA_COREAUDIO_OUTPUT_BUS; + } else { + deviceScope = kAudioUnitScope_Output; + deviceBus = MA_COREAUDIO_INPUT_BUS; + } + + status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pChannelLayout = (AudioChannelLayout*)ma_malloc(channelLayoutSize, &pContext->allocationCallbacks); + if (pChannelLayout == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize); + if (status != noErr) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, pChannelMap, channelMapCap); + if (result != MA_SUCCESS) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return MA_SUCCESS; +} +#endif /* MA_APPLE_DESKTOP */ + + +#if !defined(MA_APPLE_DESKTOP) +static void ma_AVAudioSessionPortDescription_to_device_info(AVAudioSessionPortDescription* pPortDesc, ma_device_info* pInfo) +{ + MA_ZERO_OBJECT(pInfo); + ma_strncpy_s(pInfo->name, sizeof(pInfo->name), [pPortDesc.portName UTF8String], (size_t)-1); + ma_strncpy_s(pInfo->id.coreaudio, sizeof(pInfo->id.coreaudio), [pPortDesc.UID UTF8String], (size_t)-1); +} +#endif + +static ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ +#if defined(MA_APPLE_DESKTOP) + UInt32 deviceCount; + AudioObjectID* pDeviceObjectIDs; + AudioObjectID defaultDeviceObjectIDPlayback; + AudioObjectID defaultDeviceObjectIDCapture; + ma_result result; + UInt32 iDevice; + + ma_find_default_AudioObjectID(pContext, ma_device_type_playback, &defaultDeviceObjectIDPlayback); /* OK if this fails. */ + ma_find_default_AudioObjectID(pContext, ma_device_type_capture, &defaultDeviceObjectIDCapture); /* OK if this fails. */ + + result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs); + if (result != MA_SUCCESS) { + return result; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice]; + ma_device_info info; + + MA_ZERO_OBJECT(&info); + if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(info.id.coreaudio), info.id.coreaudio) != MA_SUCCESS) { + continue; + } + if (ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(info.name), info.name) != MA_SUCCESS) { + continue; + } + + if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) { + if (deviceObjectID == defaultDeviceObjectIDPlayback) { + info.isDefault = MA_TRUE; + } + + if (!callback(pContext, ma_device_type_playback, &info, pUserData)) { + break; + } + } + if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) { + if (deviceObjectID == defaultDeviceObjectIDCapture) { + info.isDefault = MA_TRUE; + } + + if (!callback(pContext, ma_device_type_capture, &info, pUserData)) { + break; + } + } + } + + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); +#else + ma_device_info info; + NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs]; + NSArray *pOutputs = [[[AVAudioSession sharedInstance] currentRoute] outputs]; + + for (AVAudioSessionPortDescription* pPortDesc in pOutputs) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, &info); + if (!callback(pContext, ma_device_type_playback, &info, pUserData)) { + return MA_SUCCESS; + } + } + + for (AVAudioSessionPortDescription* pPortDesc in pInputs) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, &info); + if (!callback(pContext, ma_device_type_capture, &info, pUserData)) { + return MA_SUCCESS; + } + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result; + + MA_ASSERT(pContext != NULL); + +#if defined(MA_APPLE_DESKTOP) + /* Desktop */ + { + AudioObjectID deviceObjectID; + AudioObjectID defaultDeviceObjectID; + UInt32 streamDescriptionCount; + AudioStreamRangedDescription* pStreamDescriptions; + UInt32 iStreamDescription; + UInt32 sampleRateRangeCount; + AudioValueRange* pSampleRateRanges; + + ma_find_default_AudioObjectID(pContext, deviceType, &defaultDeviceObjectID); /* OK if this fails. */ + + result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(pDeviceInfo->id.coreaudio), pDeviceInfo->id.coreaudio); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pDeviceInfo->name), pDeviceInfo->name); + if (result != MA_SUCCESS) { + return result; + } + + if (deviceObjectID == defaultDeviceObjectID) { + pDeviceInfo->isDefault = MA_TRUE; + } + + /* + There could be a large number of permutations here. Fortunately there is only a single channel count + being reported which reduces this quite a bit. For sample rates we're only reporting those that are + one of miniaudio's recognized "standard" rates. If there are still more formats than can fit into + our fixed sized array we'll just need to truncate them. This is unlikely and will probably only happen + if some driver performs software data conversion and therefore reports every possible format and + sample rate. + */ + pDeviceInfo->nativeDataFormatCount = 0; + + /* Formats. */ + { + ma_format uniqueFormats[ma_format_count]; + ma_uint32 uniqueFormatCount = 0; + ma_uint32 channels; + + /* Channels. */ + result = ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &channels); + if (result != MA_SUCCESS) { + return result; + } + + /* Formats. */ + result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &streamDescriptionCount, &pStreamDescriptions); + if (result != MA_SUCCESS) { + return result; + } + + for (iStreamDescription = 0; iStreamDescription < streamDescriptionCount; ++iStreamDescription) { + ma_format format; + ma_bool32 hasFormatBeenHandled = MA_FALSE; + ma_uint32 iOutputFormat; + ma_uint32 iSampleRate; + + result = ma_format_from_AudioStreamBasicDescription(&pStreamDescriptions[iStreamDescription].mFormat, &format); + if (result != MA_SUCCESS) { + continue; + } + + MA_ASSERT(format != ma_format_unknown); + + /* Make sure the format isn't already in the output list. */ + for (iOutputFormat = 0; iOutputFormat < uniqueFormatCount; ++iOutputFormat) { + if (uniqueFormats[iOutputFormat] == format) { + hasFormatBeenHandled = MA_TRUE; + break; + } + } + + /* If we've already handled this format just skip it. */ + if (hasFormatBeenHandled) { + continue; + } + + uniqueFormats[uniqueFormatCount] = format; + uniqueFormatCount += 1; + + /* Sample Rates */ + result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges); + if (result != MA_SUCCESS) { + return result; + } + + /* + Annoyingly Core Audio reports a sample rate range. We just get all the standard rates that are + between this range. + */ + for (iSampleRate = 0; iSampleRate < sampleRateRangeCount; ++iSampleRate) { + ma_uint32 iStandardSampleRate; + for (iStandardSampleRate = 0; iStandardSampleRate < ma_countof(g_maStandardSampleRatePriorities); iStandardSampleRate += 1) { + ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iStandardSampleRate]; + if (standardSampleRate >= pSampleRateRanges[iSampleRate].mMinimum && standardSampleRate <= pSampleRateRanges[iSampleRate].mMaximum) { + /* We have a new data format. Add it to the list. */ + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = standardSampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; + + if (pDeviceInfo->nativeDataFormatCount >= ma_countof(pDeviceInfo->nativeDataFormats)) { + break; /* No more room for any more formats. */ + } + } + } + } + + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + + if (pDeviceInfo->nativeDataFormatCount >= ma_countof(pDeviceInfo->nativeDataFormats)) { + break; /* No more room for any more formats. */ + } + } + + ma_free(pStreamDescriptions, &pContext->allocationCallbacks); + } + } +#else + /* Mobile */ + { + AudioComponentDescription desc; + AudioComponent component; + AudioUnit audioUnit; + OSStatus status; + AudioUnitScope formatScope; + AudioUnitElement formatElement; + AudioStreamBasicDescription bestFormat; + UInt32 propSize; + + /* We want to ensure we use a consistent device name to device enumeration. */ + if (pDeviceID != NULL && pDeviceID->coreaudio[0] != '\0') { + ma_bool32 found = MA_FALSE; + if (deviceType == ma_device_type_playback) { + NSArray *pOutputs = [[[AVAudioSession sharedInstance] currentRoute] outputs]; + for (AVAudioSessionPortDescription* pPortDesc in pOutputs) { + if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, pDeviceInfo); + found = MA_TRUE; + break; + } + } + } else { + NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs]; + for (AVAudioSessionPortDescription* pPortDesc in pInputs) { + if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, pDeviceInfo); + found = MA_TRUE; + break; + } + } + } + + if (!found) { + return MA_DOES_NOT_EXIST; + } + } else { + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + } + + + /* + Retrieving device information is more annoying on mobile than desktop. For simplicity I'm locking this down to whatever format is + reported on a temporary I/O unit. The problem, however, is that this doesn't return a value for the sample rate which we need to + retrieve from the AVAudioSession shared instance. + */ + desc.componentType = kAudioUnitType_Output; + desc.componentSubType = kAudioUnitSubType_RemoteIO; + desc.componentManufacturer = kAudioUnitManufacturer_Apple; + desc.componentFlags = 0; + desc.componentFlagsMask = 0; + + component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc); + if (component == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output; + formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS; + + propSize = sizeof(bestFormat); + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit); + return ma_result_from_OSStatus(status); + } + + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit); + audioUnit = NULL; + + /* Only a single format is being reported for iOS. */ + pDeviceInfo->nativeDataFormatCount = 1; + + result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pDeviceInfo->nativeDataFormats[0].format); + if (result != MA_SUCCESS) { + return result; + } + + pDeviceInfo->nativeDataFormats[0].channels = bestFormat.mChannelsPerFrame; + + /* + It looks like Apple are wanting to push the whole AVAudioSession thing. Thus, we need to use that to determine device settings. To do + this we just get the shared instance and inspect. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + pDeviceInfo->nativeDataFormats[0].sampleRate = (ma_uint32)pAudioSession.sampleRate; + } + } +#endif + + (void)pDeviceInfo; /* Unused. */ + return MA_SUCCESS; +} + +static AudioBufferList* ma_allocate_AudioBufferList__coreaudio(ma_uint32 sizeInFrames, ma_format format, ma_uint32 channels, ma_stream_layout layout, const ma_allocation_callbacks* pAllocationCallbacks) +{ + AudioBufferList* pBufferList; + UInt32 audioBufferSizeInBytes; + size_t allocationSize; + + MA_ASSERT(sizeInFrames > 0); + MA_ASSERT(format != ma_format_unknown); + MA_ASSERT(channels > 0); + + allocationSize = sizeof(AudioBufferList) - sizeof(AudioBuffer); /* Subtract sizeof(AudioBuffer) because that part is dynamically sized. */ + if (layout == ma_stream_layout_interleaved) { + /* Interleaved case. This is the simple case because we just have one buffer. */ + allocationSize += sizeof(AudioBuffer) * 1; + } else { + /* Non-interleaved case. This is the more complex case because there's more than one buffer. */ + allocationSize += sizeof(AudioBuffer) * channels; + } + + allocationSize += sizeInFrames * ma_get_bytes_per_frame(format, channels); + + pBufferList = (AudioBufferList*)ma_malloc(allocationSize, pAllocationCallbacks); + if (pBufferList == NULL) { + return NULL; + } + + audioBufferSizeInBytes = (UInt32)(sizeInFrames * ma_get_bytes_per_sample(format)); + + if (layout == ma_stream_layout_interleaved) { + pBufferList->mNumberBuffers = 1; + pBufferList->mBuffers[0].mNumberChannels = channels; + pBufferList->mBuffers[0].mDataByteSize = audioBufferSizeInBytes * channels; + pBufferList->mBuffers[0].mData = (ma_uint8*)pBufferList + sizeof(AudioBufferList); + } else { + ma_uint32 iBuffer; + pBufferList->mNumberBuffers = channels; + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { + pBufferList->mBuffers[iBuffer].mNumberChannels = 1; + pBufferList->mBuffers[iBuffer].mDataByteSize = audioBufferSizeInBytes; + pBufferList->mBuffers[iBuffer].mData = (ma_uint8*)pBufferList + ((sizeof(AudioBufferList) - sizeof(AudioBuffer)) + (sizeof(AudioBuffer) * channels)) + (audioBufferSizeInBytes * iBuffer); + } + } + + return pBufferList; +} + +static ma_result ma_device_realloc_AudioBufferList__coreaudio(ma_device* pDevice, ma_uint32 sizeInFrames, ma_format format, ma_uint32 channels, ma_stream_layout layout) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(format != ma_format_unknown); + MA_ASSERT(channels > 0); + + /* Only resize the buffer if necessary. */ + if (pDevice->coreaudio.audioBufferCapInFrames < sizeInFrames) { + AudioBufferList* pNewAudioBufferList; + + pNewAudioBufferList = ma_allocate_AudioBufferList__coreaudio(sizeInFrames, format, channels, layout, &pDevice->pContext->allocationCallbacks); + if (pNewAudioBufferList == NULL) { + return MA_OUT_OF_MEMORY; + } + + /* At this point we'll have a new AudioBufferList and we can free the old one. */ + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + pDevice->coreaudio.pAudioBufferList = pNewAudioBufferList; + pDevice->coreaudio.audioBufferCapInFrames = sizeInFrames; + } + + /* Getting here means the capacity of the audio is fine. */ + return MA_SUCCESS; +} + + +static OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_stream_layout layout; + + MA_ASSERT(pDevice != NULL); + + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", (int)busNumber, (int)frameCount, (int)pBufferList->mNumberBuffers);*/ + + /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */ + layout = ma_stream_layout_interleaved; + if (pBufferList->mBuffers[0].mNumberChannels != pDevice->playback.internalChannels) { + layout = ma_stream_layout_deinterleaved; + } + + if (layout == ma_stream_layout_interleaved) { + /* For now we can assume everything is interleaved. */ + UInt32 iBuffer; + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { + if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->playback.internalChannels) { + ma_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + if (frameCountForThisBuffer > 0) { + ma_device_handle_backend_data_callback(pDevice, pBufferList->mBuffers[iBuffer].mData, NULL, frameCountForThisBuffer); + } + + /*a_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pBufferList->mBuffers[iBuffer].mNumberChannels, (int)pBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } else { + /* + This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just + output silence here. + */ + MA_ZERO_MEMORY(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize); + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pBufferList->mBuffers[iBuffer].mNumberChannels, (int)pBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } + } + } else { + /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */ + MA_ASSERT(pDevice->playback.internalChannels <= MA_MAX_CHANNELS); /* This should heve been validated at initialization time. */ + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pBufferList->mNumberBuffers % pDevice->playback.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + UInt32 iBuffer; + + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) { + ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat); + ma_uint32 framesRemaining = frameCountPerBuffer; + + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + if (framesToRead > framesRemaining) { + framesToRead = framesRemaining; + } + + ma_device_handle_backend_data_callback(pDevice, tempBuffer, NULL, framesToRead); + + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer+iChannel].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + } + + ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers); + + framesRemaining -= framesToRead; + } + } + } + } + + (void)pActionFlags; + (void)pTimeStamp; + (void)busNumber; + (void)frameCount; + + return noErr; +} + +static OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList) +{ + ma_device* pDevice = (ma_device*)pUserData; + AudioBufferList* pRenderedBufferList; + ma_result result; + ma_stream_layout layout; + ma_uint32 iBuffer; + OSStatus status; + + MA_ASSERT(pDevice != NULL); + + pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList; + MA_ASSERT(pRenderedBufferList); + + /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */ + layout = ma_stream_layout_interleaved; + if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->capture.internalChannels) { + layout = ma_stream_layout_deinterleaved; + } + + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", (int)busNumber, (int)frameCount, (int)pRenderedBufferList->mNumberBuffers);*/ + + /* + There has been a situation reported where frame count passed into this function is greater than the capacity of + our capture buffer. There doesn't seem to be a reliable way to determine what the maximum frame count will be, + so we need to instead resort to dynamically reallocating our buffer to ensure it's large enough to capture the + number of frames requested by this callback. + */ + result = ma_device_realloc_AudioBufferList__coreaudio(pDevice, frameCount, pDevice->capture.internalFormat, pDevice->capture.internalChannels, layout); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "Failed to allocate AudioBufferList for capture.\n"); + return noErr; + } + + pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList; + MA_ASSERT(pRenderedBufferList); + + /* + When you call AudioUnitRender(), Core Audio tries to be helpful by setting the mDataByteSize to the number of bytes + that were actually rendered. The problem with this is that the next call can fail with -50 due to the size no longer + being set to the capacity of the buffer, but instead the size in bytes of the previous render. This will cause a + problem when a future call to this callback specifies a larger number of frames. + + To work around this we need to explicitly set the size of each buffer to their respective size in bytes. + */ + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) { + pRenderedBufferList->mBuffers[iBuffer].mDataByteSize = pDevice->coreaudio.audioBufferCapInFrames * ma_get_bytes_per_sample(pDevice->capture.internalFormat) * pRenderedBufferList->mBuffers[iBuffer].mNumberChannels; + } + + status = ((ma_AudioUnitRender_proc)pDevice->pContext->coreaudio.AudioUnitRender)((AudioUnit)pDevice->coreaudio.audioUnitCapture, pActionFlags, pTimeStamp, busNumber, frameCount, pRenderedBufferList); + if (status != noErr) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " ERROR: AudioUnitRender() failed with %d.\n", (int)status); + return status; + } + + if (layout == ma_stream_layout_interleaved) { + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) { + if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->capture.internalChannels) { + ma_device_handle_backend_data_callback(pDevice, NULL, pRenderedBufferList->mBuffers[iBuffer].mData, frameCount); + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " mDataByteSize=%d.\n", (int)pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } else { + /* + This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. + */ + ma_uint8 silentBuffer[4096]; + ma_uint32 framesRemaining; + + MA_ZERO_MEMORY(silentBuffer, sizeof(silentBuffer)); + + framesRemaining = frameCount; + while (framesRemaining > 0) { + ma_uint32 framesToSend = sizeof(silentBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + ma_device_handle_backend_data_callback(pDevice, NULL, silentBuffer, framesToSend); + + framesRemaining -= framesToSend; + } + + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, (int)pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } + } + } else { + /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */ + MA_ASSERT(pDevice->capture.internalChannels <= MA_MAX_CHANNELS); /* This should have been validated at initialization time. */ + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pRenderedBufferList->mNumberBuffers % pDevice->capture.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) { + ma_uint32 framesRemaining = frameCount; + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer+iChannel].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + } + + ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer); + ma_device_handle_backend_data_callback(pDevice, NULL, tempBuffer, framesToSend); + + framesRemaining -= framesToSend; + } + } + } + } + + (void)pActionFlags; + (void)pTimeStamp; + (void)busNumber; + (void)frameCount; + (void)pUnusedBufferList; + + return noErr; +} + +static void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + /* Don't do anything if it looks like we're just reinitializing due to a device switch. */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) { + return; + } + + /* + There's been a report of a deadlock here when triggered by ma_device_uninit(). It looks like + AudioUnitGetProprty (called below) and AudioComponentInstanceDispose (called in ma_device_uninit) + can try waiting on the same lock. I'm going to try working around this by not calling any Core + Audio APIs in the callback when the device has been stopped or uninitialized. + */ + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized || ma_device_get_state(pDevice) == ma_device_state_stopping || ma_device_get_state(pDevice) == ma_device_state_stopped) { + ma_device__on_notification_stopped(pDevice); + } else { + UInt32 isRunning; + UInt32 isRunningSize = sizeof(isRunning); + OSStatus status = ((ma_AudioUnitGetProperty_proc)pDevice->pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioOutputUnitProperty_IsRunning, scope, element, &isRunning, &isRunningSize); + if (status != noErr) { + goto done; /* Don't really know what to do in this case... just ignore it, I suppose... */ + } + + if (!isRunning) { + /* + The stop event is a bit annoying in Core Audio because it will be called when we automatically switch the default device. Some scenarios to consider: + + 1) When the device is unplugged, this will be called _before_ the default device change notification. + 2) When the device is changed via the default device change notification, this will be called _after_ the switch. + + For case #1, we just check if there's a new default device available. If so, we just ignore the stop event. For case #2 we check a flag. + */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isDefaultPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isDefaultCaptureDevice)) { + /* + It looks like the device is switching through an external event, such as the user unplugging the device or changing the default device + via the operating system's sound settings. If we're re-initializing the device, we just terminate because we want the stopping of the + device to be seamless to the client (we don't want them receiving the stopped event and thinking that the device has stopped when it + hasn't!). + */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) { + goto done; + } + + /* + Getting here means the device is not reinitializing which means it may have been unplugged. From what I can see, it looks like Core Audio + will try switching to the new default device seamlessly. We need to somehow find a way to determine whether or not Core Audio will most + likely be successful in switching to the new device. + + TODO: Try to predict if Core Audio will switch devices. If not, the stopped callback needs to be posted. + */ + goto done; + } + + /* Getting here means we need to stop the device. */ + ma_device__on_notification_stopped(pDevice); + } + } + + (void)propertyID; /* Unused. */ + +done: + /* Always signal the stop event. It's possible for the "else" case to get hit which can happen during an interruption. */ + ma_event_signal(&pDevice->coreaudio.stopEvent); +} + +#if defined(MA_APPLE_DESKTOP) +static ma_spinlock g_DeviceTrackingInitLock_CoreAudio = 0; /* A spinlock for mutal exclusion of the init/uninit of the global tracking data. Initialization to 0 is what we need. */ +static ma_uint32 g_DeviceTrackingInitCounter_CoreAudio = 0; +static ma_mutex g_DeviceTrackingMutex_CoreAudio; +static ma_device** g_ppTrackedDevices_CoreAudio = NULL; +static ma_uint32 g_TrackedDeviceCap_CoreAudio = 0; +static ma_uint32 g_TrackedDeviceCount_CoreAudio = 0; + +static OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData) +{ + ma_device_type deviceType; + + /* Not sure if I really need to check this, but it makes me feel better. */ + if (addressCount == 0) { + return noErr; + } + + if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultOutputDevice) { + deviceType = ma_device_type_playback; + } else if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultInputDevice) { + deviceType = ma_device_type_capture; + } else { + return noErr; /* Should never hit this. */ + } + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + ma_uint32 iDevice; + for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) { + ma_result reinitResult; + ma_device* pDevice; + + pDevice = g_ppTrackedDevices_CoreAudio[iDevice]; + if (pDevice->type == deviceType || pDevice->type == ma_device_type_duplex) { + if (deviceType == ma_device_type_playback) { + pDevice->coreaudio.isSwitchingPlaybackDevice = MA_TRUE; + reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE); + pDevice->coreaudio.isSwitchingPlaybackDevice = MA_FALSE; + } else { + pDevice->coreaudio.isSwitchingCaptureDevice = MA_TRUE; + reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE); + pDevice->coreaudio.isSwitchingCaptureDevice = MA_FALSE; + } + + if (reinitResult == MA_SUCCESS) { + ma_device__post_init_setup(pDevice, deviceType); + + /* Restart the device if required. If this fails we need to stop the device entirely. */ + if (ma_device_get_state(pDevice) == ma_device_state_started) { + OSStatus status; + if (deviceType == ma_device_type_playback) { + status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + ma_device__set_state(pDevice, ma_device_state_stopped); + } + } else if (deviceType == ma_device_type_capture) { + status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + ma_device__set_state(pDevice, ma_device_state_stopped); + } + } + } + + ma_device__on_notification_rerouted(pDevice); + } + } + } + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + /* Unused parameters. */ + (void)objectID; + (void)pUserData; + + return noErr; +} + +static ma_result ma_context__init_device_tracking__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + ma_spinlock_lock(&g_DeviceTrackingInitLock_CoreAudio); + { + /* Don't do anything if we've already initializd device tracking. */ + if (g_DeviceTrackingInitCounter_CoreAudio == 0) { + AudioObjectPropertyAddress propAddress; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + ma_mutex_init(&g_DeviceTrackingMutex_CoreAudio); + + propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + } + g_DeviceTrackingInitCounter_CoreAudio += 1; + } + ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio); + + return MA_SUCCESS; +} + +static ma_result ma_context__uninit_device_tracking__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + ma_spinlock_lock(&g_DeviceTrackingInitLock_CoreAudio); + { + if (g_DeviceTrackingInitCounter_CoreAudio > 0) + g_DeviceTrackingInitCounter_CoreAudio -= 1; + + if (g_DeviceTrackingInitCounter_CoreAudio == 0) { + AudioObjectPropertyAddress propAddress; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + /* At this point there should be no tracked devices. If not there's an error somewhere. */ + if (g_ppTrackedDevices_CoreAudio != NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "You have uninitialized all contexts while an associated device is still active."); + ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio); + return MA_INVALID_OPERATION; + } + + ma_mutex_uninit(&g_DeviceTrackingMutex_CoreAudio); + } + } + ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio); + + return MA_SUCCESS; +} + +static ma_result ma_device__track__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + /* Allocate memory if required. */ + if (g_TrackedDeviceCap_CoreAudio <= g_TrackedDeviceCount_CoreAudio) { + ma_uint32 newCap; + ma_device** ppNewDevices; + + newCap = g_TrackedDeviceCap_CoreAudio * 2; + if (newCap == 0) { + newCap = 1; + } + + ppNewDevices = (ma_device**)ma_realloc(g_ppTrackedDevices_CoreAudio, sizeof(*g_ppTrackedDevices_CoreAudio)*newCap, &pDevice->pContext->allocationCallbacks); + if (ppNewDevices == NULL) { + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + return MA_OUT_OF_MEMORY; + } + + g_ppTrackedDevices_CoreAudio = ppNewDevices; + g_TrackedDeviceCap_CoreAudio = newCap; + } + + g_ppTrackedDevices_CoreAudio[g_TrackedDeviceCount_CoreAudio] = pDevice; + g_TrackedDeviceCount_CoreAudio += 1; + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + return MA_SUCCESS; +} + +static ma_result ma_device__untrack__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + ma_uint32 iDevice; + for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) { + if (g_ppTrackedDevices_CoreAudio[iDevice] == pDevice) { + /* We've found the device. We now need to remove it from the list. */ + ma_uint32 jDevice; + for (jDevice = iDevice; jDevice < g_TrackedDeviceCount_CoreAudio-1; jDevice += 1) { + g_ppTrackedDevices_CoreAudio[jDevice] = g_ppTrackedDevices_CoreAudio[jDevice+1]; + } + + g_TrackedDeviceCount_CoreAudio -= 1; + + /* If there's nothing else in the list we need to free memory. */ + if (g_TrackedDeviceCount_CoreAudio == 0) { + ma_free(g_ppTrackedDevices_CoreAudio, &pDevice->pContext->allocationCallbacks); + g_ppTrackedDevices_CoreAudio = NULL; + g_TrackedDeviceCap_CoreAudio = 0; + } + + break; + } + } + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + return MA_SUCCESS; +} +#endif + +#if defined(MA_APPLE_MOBILE) +@interface ma_ios_notification_handler:NSObject { + ma_device* m_pDevice; +} +@end + +@implementation ma_ios_notification_handler +-(id)init:(ma_device*)pDevice +{ + self = [super init]; + m_pDevice = pDevice; + + /* For route changes. */ + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_route_change:) name:AVAudioSessionRouteChangeNotification object:[AVAudioSession sharedInstance]]; + + /* For interruptions. */ + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_interruption:) name:AVAudioSessionInterruptionNotification object:[AVAudioSession sharedInstance]]; + + return self; +} + +-(void)dealloc +{ + [self remove_handler]; + +#if defined(__has_feature) +#if !__has_feature(objc_arc) + [super dealloc]; +#endif +#endif +} + +-(void)remove_handler +{ + [[NSNotificationCenter defaultCenter] removeObserver:self name:AVAudioSessionRouteChangeNotification object:nil]; + [[NSNotificationCenter defaultCenter] removeObserver:self name:AVAudioSessionInterruptionNotification object:nil]; +} + +-(void)handle_interruption:(NSNotification*)pNotification +{ + NSInteger type = [[[pNotification userInfo] objectForKey:AVAudioSessionInterruptionTypeKey] integerValue]; + switch (type) + { + case AVAudioSessionInterruptionTypeBegan: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Interruption: AVAudioSessionInterruptionTypeBegan\n"); + + /* + Core Audio will have stopped the internal device automatically, but we need explicitly + stop it at a higher level to ensure miniaudio-specific state is updated for consistency. + */ + ma_device_stop(m_pDevice); + + /* + Fire the notification after the device has been stopped to ensure it's in the correct + state when the notification handler is invoked. + */ + ma_device__on_notification_interruption_began(m_pDevice); + } break; + + case AVAudioSessionInterruptionTypeEnded: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Interruption: AVAudioSessionInterruptionTypeEnded\n"); + ma_device__on_notification_interruption_ended(m_pDevice); + } break; + } +} + +-(void)handle_route_change:(NSNotification*)pNotification +{ + AVAudioSession* pSession = [AVAudioSession sharedInstance]; + + NSInteger reason = [[[pNotification userInfo] objectForKey:AVAudioSessionRouteChangeReasonKey] integerValue]; + switch (reason) + { + case AVAudioSessionRouteChangeReasonOldDeviceUnavailable: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOldDeviceUnavailable\n"); + } break; + + case AVAudioSessionRouteChangeReasonNewDeviceAvailable: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNewDeviceAvailable\n"); + } break; + + case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory\n"); + } break; + + case AVAudioSessionRouteChangeReasonWakeFromSleep: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonWakeFromSleep\n"); + } break; + + case AVAudioSessionRouteChangeReasonOverride: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOverride\n"); + } break; + + case AVAudioSessionRouteChangeReasonCategoryChange: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonCategoryChange\n"); + } break; + + case AVAudioSessionRouteChangeReasonUnknown: + default: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonUnknown\n"); + } break; + } + + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_DEBUG, "[Core Audio] Changing Route. inputNumberChannels=%d; outputNumberOfChannels=%d\n", (int)pSession.inputNumberOfChannels, (int)pSession.outputNumberOfChannels); + + /* Let the application know about the route change. */ + ma_device__on_notification_rerouted(m_pDevice); +} +@end +#endif + +static ma_result ma_device_uninit__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_uninitialized); + +#if defined(MA_APPLE_DESKTOP) + /* + Make sure we're no longer tracking the device. It doesn't matter if we call this for a non-default device because it'll + just gracefully ignore it. + */ + ma_device__untrack__coreaudio(pDevice); +#endif +#if defined(MA_APPLE_MOBILE) + if (pDevice->coreaudio.pNotificationHandler != NULL) { + ma_ios_notification_handler* pNotificationHandler = (MA_BRIDGE_TRANSFER ma_ios_notification_handler*)pDevice->coreaudio.pNotificationHandler; + [pNotificationHandler remove_handler]; + } +#endif + + if (pDevice->coreaudio.audioUnitCapture != NULL) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + if (pDevice->coreaudio.audioUnitPlayback != NULL) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + + if (pDevice->coreaudio.pAudioBufferList) { + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +typedef struct +{ + ma_bool32 allowNominalSampleRateChange; + + /* Input. */ + ma_format formatIn; + ma_uint32 channelsIn; + ma_uint32 sampleRateIn; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesIn; + ma_uint32 periodSizeInMillisecondsIn; + ma_uint32 periodsIn; + ma_share_mode shareMode; + ma_performance_profile performanceProfile; + ma_bool32 registerStopEvent; + + /* Output. */ +#if defined(MA_APPLE_DESKTOP) + AudioObjectID deviceObjectID; +#endif + AudioComponent component; + AudioUnit audioUnit; + AudioBufferList* pAudioBufferList; /* Only used for input devices. */ + ma_format formatOut; + ma_uint32 channelsOut; + ma_uint32 sampleRateOut; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesOut; + ma_uint32 periodsOut; + char deviceName[256]; +} ma_device_init_internal_data__coreaudio; + +static ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */ +{ + ma_result result; + OSStatus status; + UInt32 enableIOFlag; + AudioStreamBasicDescription bestFormat; + UInt32 actualPeriodSizeInFrames; + AURenderCallbackStruct callbackInfo; +#if defined(MA_APPLE_DESKTOP) + AudioObjectID deviceObjectID; +#endif + + /* This API should only be used for a single device type: playback or capture. No full-duplex mode. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pContext != NULL); + MA_ASSERT(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture); + +#if defined(MA_APPLE_DESKTOP) + pData->deviceObjectID = 0; +#endif + pData->component = NULL; + pData->audioUnit = NULL; + pData->pAudioBufferList = NULL; + +#if defined(MA_APPLE_DESKTOP) + result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID); + if (result != MA_SUCCESS) { + return result; + } + + pData->deviceObjectID = deviceObjectID; +#endif + + /* Core audio doesn't really use the notion of a period so we can leave this unmodified, but not too over the top. */ + pData->periodsOut = pData->periodsIn; + if (pData->periodsOut == 0) { + pData->periodsOut = MA_DEFAULT_PERIODS; + } + if (pData->periodsOut > 16) { + pData->periodsOut = 16; + } + + + /* Audio unit. */ + status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)((AudioComponent)pContext->coreaudio.component, (AudioUnit*)&pData->audioUnit); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + + /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */ + enableIOFlag = 1; + if (deviceType == ma_device_type_capture) { + enableIOFlag = 0; + } + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + enableIOFlag = (enableIOFlag == 0) ? 1 : 0; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + + /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */ +#if defined(MA_APPLE_DESKTOP) + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceObjectID, sizeof(deviceObjectID)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(result); + } +#else + /* + For some reason it looks like Apple is only allowing selection of the input device. There does not appear to be any way to change + the default output route. I have no idea why this is like this, but for now we'll only be able to configure capture devices. + */ + if (pDeviceID != NULL) { + if (deviceType == ma_device_type_capture) { + ma_bool32 found = MA_FALSE; + NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs]; + for (AVAudioSessionPortDescription* pPortDesc in pInputs) { + if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) { + [[AVAudioSession sharedInstance] setPreferredInput:pPortDesc error:nil]; + found = MA_TRUE; + break; + } + } + + if (found == MA_FALSE) { + return MA_DOES_NOT_EXIST; + } + } + } +#endif + + /* + Format. This is the hardest part of initialization because there's a few variables to take into account. + 1) The format must be supported by the device. + 2) The format must be supported miniaudio. + 3) There's a priority that miniaudio prefers. + + Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The + most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same + for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely. + + On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to. + */ + { + AudioStreamBasicDescription origFormat; + UInt32 origFormatSize = sizeof(origFormat); + AudioUnitScope formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output; + AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS; + + if (deviceType == ma_device_type_playback) { + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &origFormat, &origFormatSize); + } else { + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &origFormat, &origFormatSize); + } + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + +#if defined(MA_APPLE_DESKTOP) + result = ma_find_best_format__coreaudio(pContext, deviceObjectID, deviceType, pData->formatIn, pData->channelsIn, pData->sampleRateIn, &origFormat, &bestFormat); + if (result != MA_SUCCESS) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return result; + } + + /* + Technical Note TN2091: Device input using the HAL Output Audio Unit + https://developer.apple.com/library/archive/technotes/tn2091/_index.html + + This documentation says the following: + + The internal AudioConverter can handle any *simple* conversion. Typically, this means that a client can specify ANY + variant of the PCM formats. Consequently, the device's sample rate should match the desired sample rate. If sample rate + conversion is needed, it can be accomplished by buffering the input and converting the data on a separate thread with + another AudioConverter. + + The important part here is the mention that it can handle *simple* conversions, which does *not* include sample rate. We + therefore want to ensure the sample rate stays consistent. This document is specifically for input, but I'm going to play it + safe and apply the same rule to output as well. + + I have tried going against the documentation by setting the sample rate anyway, but this just results in AudioUnitRender() + returning a result code of -10863. I have also tried changing the format directly on the input scope on the input bus, but + this just results in `ca_require: IsStreamFormatWritable(inScope, inElement) NotWritable` when trying to set the format. + + Something that does seem to work, however, has been setting the nominal sample rate on the deivce object. The problem with + this, however, is that it actually changes the sample rate at the operating system level and not just the application. This + could be intrusive to the user, however, so I don't think it's wise to make this the default. Instead I'm making this a + configuration option. When the `coreaudio.allowNominalSampleRateChange` config option is set to true, changing the sample + rate will be allowed. Otherwise it'll be fixed to the current sample rate. To check the system-defined sample rate, run + the Audio MIDI Setup program that comes installed on macOS and observe how the sample rate changes as the sample rate is + changed by miniaudio. + */ + if (pData->allowNominalSampleRateChange) { + AudioValueRange sampleRateRange; + AudioObjectPropertyAddress propAddress; + + sampleRateRange.mMinimum = bestFormat.mSampleRate; + sampleRateRange.mMaximum = bestFormat.mSampleRate; + + propAddress.mSelector = kAudioDevicePropertyNominalSampleRate; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(sampleRateRange), &sampleRateRange); + if (status != noErr) { + bestFormat.mSampleRate = origFormat.mSampleRate; + } + } else { + bestFormat.mSampleRate = origFormat.mSampleRate; + } + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat)); + if (status != noErr) { + /* We failed to set the format, so fall back to the current format of the audio unit. */ + bestFormat = origFormat; + } +#else + bestFormat = origFormat; + + /* + Sample rate is a little different here because for some reason kAudioUnitProperty_StreamFormat returns 0... Oh well. We need to instead try + setting the sample rate to what the user has requested and then just see the results of it. Need to use some Objective-C here for this since + it depends on Apple's AVAudioSession API. To do this we just get the shared AVAudioSession instance and then set it. Note that from what I + can tell, it looks like the sample rate is shared between playback and capture for everything. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + [pAudioSession setPreferredSampleRate:(double)pData->sampleRateIn error:nil]; + bestFormat.mSampleRate = pAudioSession.sampleRate; + + /* + I've had a report that the channel count returned by AudioUnitGetProperty above is inconsistent with + AVAudioSession outputNumberOfChannels. I'm going to try using the AVAudioSession values instead. + */ + if (deviceType == ma_device_type_playback) { + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.outputNumberOfChannels; + } + if (deviceType == ma_device_type_capture) { + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.inputNumberOfChannels; + } + } + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } +#endif + + result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pData->formatOut); + if (result != MA_SUCCESS) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return result; + } + + if (pData->formatOut == ma_format_unknown) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return MA_FORMAT_NOT_SUPPORTED; + } + + pData->channelsOut = bestFormat.mChannelsPerFrame; + pData->sampleRateOut = bestFormat.mSampleRate; + } + + /* Clamp the channel count for safety. */ + if (pData->channelsOut > MA_MAX_CHANNELS) { + pData->channelsOut = MA_MAX_CHANNELS; + } + + /* + Internal channel map. This is weird in my testing. If I use the AudioObject to get the + channel map, the channel descriptions are set to "Unknown" for some reason. To work around + this it looks like retrieving it from the AudioUnit will work. However, and this is where + it gets weird, it doesn't seem to work with capture devices, nor at all on iOS... Therefore + I'm going to fall back to a default assumption in these cases. + */ +#if defined(MA_APPLE_DESKTOP) + result = ma_get_AudioUnit_channel_map(pContext, pData->audioUnit, deviceType, pData->channelMapOut, pData->channelsOut); + if (result != MA_SUCCESS) { +#if 0 + /* Try falling back to the channel map from the AudioObject. */ + result = ma_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut, pData->channelsOut); + if (result != MA_SUCCESS) { + return result; + } +#else + /* Fall back to default assumptions. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut); +#endif + } +#else + /* TODO: Figure out how to get the channel map using AVAudioSession. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut); +#endif + + + /* Buffer size. Not allowing this to be configurable on iOS. */ + if (pData->periodSizeInFramesIn == 0) { + if (pData->periodSizeInMillisecondsIn == 0) { + if (pData->performanceProfile == ma_performance_profile_low_latency) { + actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, pData->sampleRateOut); + } else { + actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, pData->sampleRateOut); + } + } else { + actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, pData->sampleRateOut); + } + } else { + actualPeriodSizeInFrames = pData->periodSizeInFramesIn; + } + +#if defined(MA_APPLE_DESKTOP) + result = ma_set_AudioObject_buffer_size_in_frames(pContext, deviceObjectID, deviceType, &actualPeriodSizeInFrames); + if (result != MA_SUCCESS) { + return result; + } +#else + /* + On iOS, the size of the IO buffer needs to be specified in seconds and is a floating point + number. I don't trust any potential truncation errors due to converting from float to integer + so I'm going to explicitly set the actual period size to the next power of 2. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + [pAudioSession setPreferredIOBufferDuration:((float)actualPeriodSizeInFrames / pAudioSession.sampleRate) error:nil]; + actualPeriodSizeInFrames = ma_next_power_of_2((ma_uint32)(pAudioSession.IOBufferDuration * pAudioSession.sampleRate)); + } +#endif + + + /* + During testing I discovered that the buffer size can be too big. You'll get an error like this: + + kAudioUnitErr_TooManyFramesToProcess : inFramesToProcess=4096, mMaxFramesPerSlice=512 + + Note how inFramesToProcess is smaller than mMaxFramesPerSlice. To fix, we need to set kAudioUnitProperty_MaximumFramesPerSlice to that + of the size of our buffer, or do it the other way around and set our buffer size to the kAudioUnitProperty_MaximumFramesPerSlice. + */ + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &actualPeriodSizeInFrames, sizeof(actualPeriodSizeInFrames)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + pData->periodSizeInFramesOut = (ma_uint32)actualPeriodSizeInFrames; + + /* We need a buffer list if this is an input device. We render into this in the input callback. */ + if (deviceType == ma_device_type_capture) { + ma_bool32 isInterleaved = (bestFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) == 0; + AudioBufferList* pBufferList; + + pBufferList = ma_allocate_AudioBufferList__coreaudio(pData->periodSizeInFramesOut, pData->formatOut, pData->channelsOut, (isInterleaved) ? ma_stream_layout_interleaved : ma_stream_layout_deinterleaved, &pContext->allocationCallbacks); + if (pBufferList == NULL) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return MA_OUT_OF_MEMORY; + } + + pData->pAudioBufferList = pBufferList; + } + + /* Callbacks. */ + callbackInfo.inputProcRefCon = pDevice_DoNotReference; + if (deviceType == ma_device_type_playback) { + callbackInfo.inputProc = ma_on_output__coreaudio; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &callbackInfo, sizeof(callbackInfo)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } else { + callbackInfo.inputProc = ma_on_input__coreaudio; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callbackInfo, sizeof(callbackInfo)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } + + /* We need to listen for stop events. */ + if (pData->registerStopEvent) { + status = ((ma_AudioUnitAddPropertyListener_proc)pContext->coreaudio.AudioUnitAddPropertyListener)(pData->audioUnit, kAudioOutputUnitProperty_IsRunning, on_start_stop__coreaudio, pDevice_DoNotReference); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } + + /* Initialize the audio unit. */ + status = ((ma_AudioUnitInitialize_proc)pContext->coreaudio.AudioUnitInitialize)(pData->audioUnit); + if (status != noErr) { + ma_free(pData->pAudioBufferList, &pContext->allocationCallbacks); + pData->pAudioBufferList = NULL; + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + /* Grab the name. */ +#if defined(MA_APPLE_DESKTOP) + ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pData->deviceName), pData->deviceName); +#else + if (deviceType == ma_device_type_playback) { + ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_PLAYBACK_DEVICE_NAME); + } else { + ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_CAPTURE_DEVICE_NAME); + } +#endif + + return result; +} + +#if defined(MA_APPLE_DESKTOP) +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit) +{ + ma_device_init_internal_data__coreaudio data; + ma_result result; + + /* This should only be called for playback or capture, not duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + data.allowNominalSampleRateChange = MA_FALSE; /* Don't change the nominal sample rate when switching devices. */ + + if (deviceType == ma_device_type_capture) { + data.formatIn = pDevice->capture.format; + data.channelsIn = pDevice->capture.channels; + data.sampleRateIn = pDevice->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + data.shareMode = pDevice->capture.shareMode; + data.performanceProfile = pDevice->coreaudio.originalPerformanceProfile; + data.registerStopEvent = MA_TRUE; + + if (disposePreviousAudioUnit) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + if (pDevice->coreaudio.pAudioBufferList) { + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + } else if (deviceType == ma_device_type_playback) { + data.formatIn = pDevice->playback.format; + data.channelsIn = pDevice->playback.channels; + data.sampleRateIn = pDevice->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + data.shareMode = pDevice->playback.shareMode; + data.performanceProfile = pDevice->coreaudio.originalPerformanceProfile; + data.registerStopEvent = (pDevice->type != ma_device_type_duplex); + + if (disposePreviousAudioUnit) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + } + data.periodSizeInFramesIn = pDevice->coreaudio.originalPeriodSizeInFrames; + data.periodSizeInMillisecondsIn = pDevice->coreaudio.originalPeriodSizeInMilliseconds; + data.periodsIn = pDevice->coreaudio.originalPeriods; + + /* Need at least 3 periods for duplex. */ + if (data.periodsIn < 3 && pDevice->type == ma_device_type_duplex) { + data.periodsIn = 3; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, deviceType, NULL, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + return result; + } + + if (deviceType == ma_device_type_capture) { +#if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID; + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDCapture, sizeof(pDevice->capture.id.coreaudio), pDevice->capture.id.coreaudio); +#endif + pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit; + pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList; + pDevice->coreaudio.audioBufferCapInFrames = data.periodSizeInFramesOut; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + } else if (deviceType == ma_device_type_playback) { +#if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID; + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDPlayback, sizeof(pDevice->playback.id.coreaudio), pDevice->playback.id.coreaudio); +#endif + pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + } + + return MA_SUCCESS; +} +#endif /* MA_APPLE_DESKTOP */ + +static ma_result ma_device_init__coreaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with the Core Audio backend for now. */ + if (((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Capture needs to be initialized first. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__coreaudio data; + data.allowNominalSampleRateChange = pConfig->coreaudio.allowNominalSampleRateChange; + data.formatIn = pDescriptorCapture->format; + data.channelsIn = pDescriptorCapture->channels; + data.sampleRateIn = pDescriptorCapture->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap)); + data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorCapture->periodSizeInMilliseconds; + data.periodsIn = pDescriptorCapture->periodCount; + data.shareMode = pDescriptorCapture->shareMode; + data.performanceProfile = pConfig->performanceProfile; + data.registerStopEvent = MA_TRUE; + + /* Need at least 3 periods for duplex. */ + if (data.periodsIn < 3 && pConfig->deviceType == ma_device_type_duplex) { + data.periodsIn = 3; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_capture, pDescriptorCapture->pDeviceID, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + return result; + } + + pDevice->coreaudio.isDefaultCaptureDevice = (pConfig->capture.pDeviceID == NULL); +#if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID; +#endif + pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit; + pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList; + pDevice->coreaudio.audioBufferCapInFrames = data.periodSizeInFramesOut; + pDevice->coreaudio.originalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames; + pDevice->coreaudio.originalPeriodSizeInMilliseconds = pDescriptorCapture->periodSizeInMilliseconds; + pDevice->coreaudio.originalPeriods = pDescriptorCapture->periodCount; + pDevice->coreaudio.originalPerformanceProfile = pConfig->performanceProfile; + + pDescriptorCapture->format = data.formatOut; + pDescriptorCapture->channels = data.channelsOut; + pDescriptorCapture->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorCapture->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorCapture->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorCapture->periodCount = data.periodsOut; + +#if defined(MA_APPLE_DESKTOP) + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDCapture, sizeof(pDevice->capture.id.coreaudio), pDevice->capture.id.coreaudio); + + /* + If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly + switch the device in the background. + */ + if (pConfig->capture.pDeviceID == NULL) { + ma_device__track__coreaudio(pDevice); + } +#endif + } + + /* Playback. */ + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__coreaudio data; + data.allowNominalSampleRateChange = pConfig->coreaudio.allowNominalSampleRateChange; + data.formatIn = pDescriptorPlayback->format; + data.channelsIn = pDescriptorPlayback->channels; + data.sampleRateIn = pDescriptorPlayback->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap)); + data.shareMode = pDescriptorPlayback->shareMode; + data.performanceProfile = pConfig->performanceProfile; + + /* In full-duplex mode we want the playback buffer to be the same size as the capture buffer. */ + if (pConfig->deviceType == ma_device_type_duplex) { + data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames; + data.periodsIn = pDescriptorCapture->periodCount; + data.registerStopEvent = MA_FALSE; + } else { + data.periodSizeInFramesIn = pDescriptorPlayback->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorPlayback->periodSizeInMilliseconds; + data.periodsIn = pDescriptorPlayback->periodCount; + data.registerStopEvent = MA_TRUE; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_playback, pDescriptorPlayback->pDeviceID, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (pDevice->coreaudio.pAudioBufferList) { + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + } + return result; + } + + pDevice->coreaudio.isDefaultPlaybackDevice = (pConfig->playback.pDeviceID == NULL); +#if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID; +#endif + pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit; + pDevice->coreaudio.originalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames; + pDevice->coreaudio.originalPeriodSizeInMilliseconds = pDescriptorPlayback->periodSizeInMilliseconds; + pDevice->coreaudio.originalPeriods = pDescriptorPlayback->periodCount; + pDevice->coreaudio.originalPerformanceProfile = pConfig->performanceProfile; + + pDescriptorPlayback->format = data.formatOut; + pDescriptorPlayback->channels = data.channelsOut; + pDescriptorPlayback->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorPlayback->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorPlayback->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorPlayback->periodCount = data.periodsOut; + +#if defined(MA_APPLE_DESKTOP) + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDPlayback, sizeof(pDevice->playback.id.coreaudio), pDevice->playback.id.coreaudio); + + /* + If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly + switch the device in the background. + */ + if (pDescriptorPlayback->pDeviceID == NULL && (pConfig->deviceType != ma_device_type_duplex || pDescriptorCapture->pDeviceID != NULL)) { + ma_device__track__coreaudio(pDevice); + } +#endif + } + + + + /* + When stopping the device, a callback is called on another thread. We need to wait for this callback + before returning from ma_device_stop(). This event is used for this. + */ + ma_event_init(&pDevice->coreaudio.stopEvent); + + /* + We need to detect when a route has changed so we can update the data conversion pipeline accordingly. This is done + differently on non-Desktop Apple platforms. + */ +#if defined(MA_APPLE_MOBILE) + pDevice->coreaudio.pNotificationHandler = (MA_BRIDGE_RETAINED void*)[[ma_ios_notification_handler alloc] init:pDevice]; +#endif + + return MA_SUCCESS; +} + + +static ma_result ma_device_start__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + return ma_result_from_OSStatus(status); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* It's not clear from the documentation whether or not AudioOutputUnitStop() actually drains the device or not. */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + /* We need to wait for the callback to finish before returning. */ + ma_event_wait(&pDevice->coreaudio.stopEvent); + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_coreaudio); + +#if defined(MA_APPLE_MOBILE) + if (!pContext->coreaudio.noAudioSessionDeactivate) { + if (![[AVAudioSession sharedInstance] setActive:false error:nil]) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "Failed to deactivate audio session."); + return MA_FAILED_TO_INIT_BACKEND; + } + } +#endif + +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); +#endif + +#if !defined(MA_APPLE_MOBILE) + ma_context__uninit_device_tracking__coreaudio(pContext); +#endif + + (void)pContext; + return MA_SUCCESS; +} + +#if defined(MA_APPLE_MOBILE) && defined(__IPHONE_12_0) +static AVAudioSessionCategory ma_to_AVAudioSessionCategory(ma_ios_session_category category) +{ + /* The "default" and "none" categories are treated different and should not be used as an input into this function. */ + MA_ASSERT(category != ma_ios_session_category_default); + MA_ASSERT(category != ma_ios_session_category_none); + + switch (category) { + case ma_ios_session_category_ambient: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_solo_ambient: return AVAudioSessionCategorySoloAmbient; + case ma_ios_session_category_playback: return AVAudioSessionCategoryPlayback; + case ma_ios_session_category_record: return AVAudioSessionCategoryRecord; + case ma_ios_session_category_play_and_record: return AVAudioSessionCategoryPlayAndRecord; + case ma_ios_session_category_multi_route: return AVAudioSessionCategoryMultiRoute; + case ma_ios_session_category_none: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_default: return AVAudioSessionCategoryAmbient; + default: return AVAudioSessionCategoryAmbient; + } +} +#endif + +static ma_result ma_context_init__coreaudio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ +#if !defined(MA_APPLE_MOBILE) + ma_result result; +#endif + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pContext != NULL); + +#if defined(MA_APPLE_MOBILE) + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + AVAudioSessionCategoryOptions options = pConfig->coreaudio.sessionCategoryOptions; + + MA_ASSERT(pAudioSession != NULL); + + if (pConfig->coreaudio.sessionCategory == ma_ios_session_category_default) { + /* + I'm going to use trial and error to determine our default session category. First we'll try PlayAndRecord. If that fails + we'll try Playback and if that fails we'll try record. If all of these fail we'll just not set the category. + */ +#if !defined(MA_APPLE_TV) && !defined(MA_APPLE_WATCH) + options |= AVAudioSessionCategoryOptionDefaultToSpeaker; +#endif + + if ([pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord withOptions:options error:nil]) { + /* Using PlayAndRecord */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryPlayback withOptions:options error:nil]) { + /* Using Playback */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryRecord withOptions:options error:nil]) { + /* Using Record */ + } else { + /* Leave as default? */ + } + } else { + if (pConfig->coreaudio.sessionCategory != ma_ios_session_category_none) { +#if defined(__IPHONE_12_0) + if (![pAudioSession setCategory: ma_to_AVAudioSessionCategory(pConfig->coreaudio.sessionCategory) withOptions:options error:nil]) { + return MA_INVALID_OPERATION; /* Failed to set session category. */ + } +#else + /* Ignore the session category on version 11 and older, but post a warning. */ + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Session category only supported in iOS 12 and newer."); +#endif + } + } + + if (!pConfig->coreaudio.noAudioSessionActivate) { + if (![pAudioSession setActive:true error:nil]) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "Failed to activate audio session."); + return MA_FAILED_TO_INIT_BACKEND; + } + } + } +#endif + +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + pContext->coreaudio.hCoreFoundation = ma_dlopen(ma_context_get_log(pContext), "CoreFoundation.framework/CoreFoundation"); + if (pContext->coreaudio.hCoreFoundation == NULL) { + return MA_API_NOT_FOUND; + } + + pContext->coreaudio.CFStringGetCString = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation, "CFStringGetCString"); + pContext->coreaudio.CFRelease = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation, "CFRelease"); + + + pContext->coreaudio.hCoreAudio = ma_dlopen(ma_context_get_log(pContext), "CoreAudio.framework/CoreAudio"); + if (pContext->coreaudio.hCoreAudio == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + + pContext->coreaudio.AudioObjectGetPropertyData = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyData"); + pContext->coreaudio.AudioObjectGetPropertyDataSize = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyDataSize"); + pContext->coreaudio.AudioObjectSetPropertyData = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectSetPropertyData"); + pContext->coreaudio.AudioObjectAddPropertyListener = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectAddPropertyListener"); + pContext->coreaudio.AudioObjectRemovePropertyListener = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectRemovePropertyListener"); + + /* + It looks like Apple has moved some APIs from AudioUnit into AudioToolbox on more recent versions of macOS. They are still + defined in AudioUnit, but just in case they decide to remove them from there entirely I'm going to implement a fallback. + The way it'll work is that it'll first try AudioUnit, and if the required symbols are not present there we'll fall back to + AudioToolbox. + */ + pContext->coreaudio.hAudioUnit = ma_dlopen(ma_context_get_log(pContext), "AudioUnit.framework/AudioUnit"); + if (pContext->coreaudio.hAudioUnit == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + + if (ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentFindNext") == NULL) { + /* Couldn't find the required symbols in AudioUnit, so fall back to AudioToolbox. */ + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + pContext->coreaudio.hAudioUnit = ma_dlopen(ma_context_get_log(pContext), "AudioToolbox.framework/AudioToolbox"); + if (pContext->coreaudio.hAudioUnit == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + } + + pContext->coreaudio.AudioComponentFindNext = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentFindNext"); + pContext->coreaudio.AudioComponentInstanceDispose = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentInstanceDispose"); + pContext->coreaudio.AudioComponentInstanceNew = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentInstanceNew"); + pContext->coreaudio.AudioOutputUnitStart = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioOutputUnitStart"); + pContext->coreaudio.AudioOutputUnitStop = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioOutputUnitStop"); + pContext->coreaudio.AudioUnitAddPropertyListener = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitAddPropertyListener"); + pContext->coreaudio.AudioUnitGetPropertyInfo = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitGetPropertyInfo"); + pContext->coreaudio.AudioUnitGetProperty = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitGetProperty"); + pContext->coreaudio.AudioUnitSetProperty = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitSetProperty"); + pContext->coreaudio.AudioUnitInitialize = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitInitialize"); + pContext->coreaudio.AudioUnitRender = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitRender"); +#else + pContext->coreaudio.CFStringGetCString = (ma_proc)CFStringGetCString; + pContext->coreaudio.CFRelease = (ma_proc)CFRelease; + +#if defined(MA_APPLE_DESKTOP) + pContext->coreaudio.AudioObjectGetPropertyData = (ma_proc)AudioObjectGetPropertyData; + pContext->coreaudio.AudioObjectGetPropertyDataSize = (ma_proc)AudioObjectGetPropertyDataSize; + pContext->coreaudio.AudioObjectSetPropertyData = (ma_proc)AudioObjectSetPropertyData; + pContext->coreaudio.AudioObjectAddPropertyListener = (ma_proc)AudioObjectAddPropertyListener; + pContext->coreaudio.AudioObjectRemovePropertyListener = (ma_proc)AudioObjectRemovePropertyListener; +#endif + + pContext->coreaudio.AudioComponentFindNext = (ma_proc)AudioComponentFindNext; + pContext->coreaudio.AudioComponentInstanceDispose = (ma_proc)AudioComponentInstanceDispose; + pContext->coreaudio.AudioComponentInstanceNew = (ma_proc)AudioComponentInstanceNew; + pContext->coreaudio.AudioOutputUnitStart = (ma_proc)AudioOutputUnitStart; + pContext->coreaudio.AudioOutputUnitStop = (ma_proc)AudioOutputUnitStop; + pContext->coreaudio.AudioUnitAddPropertyListener = (ma_proc)AudioUnitAddPropertyListener; + pContext->coreaudio.AudioUnitGetPropertyInfo = (ma_proc)AudioUnitGetPropertyInfo; + pContext->coreaudio.AudioUnitGetProperty = (ma_proc)AudioUnitGetProperty; + pContext->coreaudio.AudioUnitSetProperty = (ma_proc)AudioUnitSetProperty; + pContext->coreaudio.AudioUnitInitialize = (ma_proc)AudioUnitInitialize; + pContext->coreaudio.AudioUnitRender = (ma_proc)AudioUnitRender; +#endif + + /* Audio component. */ + { + AudioComponentDescription desc; + desc.componentType = kAudioUnitType_Output; +#if defined(MA_APPLE_DESKTOP) + desc.componentSubType = kAudioUnitSubType_HALOutput; +#else + desc.componentSubType = kAudioUnitSubType_RemoteIO; +#endif + desc.componentManufacturer = kAudioUnitManufacturer_Apple; + desc.componentFlags = 0; + desc.componentFlagsMask = 0; + + pContext->coreaudio.component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc); + if (pContext->coreaudio.component == NULL) { +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); +#endif + return MA_FAILED_TO_INIT_BACKEND; + } + } + +#if !defined(MA_APPLE_MOBILE) + result = ma_context__init_device_tracking__coreaudio(pContext); + if (result != MA_SUCCESS) { +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); +#endif + return result; + } +#endif + + pContext->coreaudio.noAudioSessionDeactivate = pConfig->coreaudio.noAudioSessionDeactivate; + + pCallbacks->onContextInit = ma_context_init__coreaudio; + pCallbacks->onContextUninit = ma_context_uninit__coreaudio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__coreaudio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__coreaudio; + pCallbacks->onDeviceInit = ma_device_init__coreaudio; + pCallbacks->onDeviceUninit = ma_device_uninit__coreaudio; + pCallbacks->onDeviceStart = ma_device_start__coreaudio; + pCallbacks->onDeviceStop = ma_device_stop__coreaudio; + pCallbacks->onDeviceRead = NULL; + pCallbacks->onDeviceWrite = NULL; + pCallbacks->onDeviceDataLoop = NULL; + + return MA_SUCCESS; +} +#endif /* Core Audio */ + + + +/****************************************************************************** + +sndio Backend + +******************************************************************************/ +#ifdef MA_HAS_SNDIO +#include + +/* +Only supporting OpenBSD. This did not work very well at all on FreeBSD when I tried it. Not sure if this is due +to miniaudio's implementation or if it's some kind of system configuration issue, but basically the default device +just doesn't emit any sound, or at times you'll hear tiny pieces. I will consider enabling this when there's +demand for it or if I can get it tested and debugged more thoroughly. +*/ +#if 0 +#if defined(__NetBSD__) || defined(__OpenBSD__) +#include +#endif +#if defined(__FreeBSD__) || defined(__DragonFly__) +#include +#endif +#endif + +#define MA_SIO_DEVANY "default" +#define MA_SIO_PLAY 1 +#define MA_SIO_REC 2 +#define MA_SIO_NENC 8 +#define MA_SIO_NCHAN 8 +#define MA_SIO_NRATE 16 +#define MA_SIO_NCONF 4 + +struct ma_sio_hdl; /* <-- Opaque */ + +struct ma_sio_par +{ + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + unsigned int rchan; + unsigned int pchan; + unsigned int rate; + unsigned int bufsz; + unsigned int xrun; + unsigned int round; + unsigned int appbufsz; + int __pad[3]; + unsigned int __magic; +}; + +struct ma_sio_enc +{ + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; +}; + +struct ma_sio_conf +{ + unsigned int enc; + unsigned int rchan; + unsigned int pchan; + unsigned int rate; +}; + +struct ma_sio_cap +{ + struct ma_sio_enc enc[MA_SIO_NENC]; + unsigned int rchan[MA_SIO_NCHAN]; + unsigned int pchan[MA_SIO_NCHAN]; + unsigned int rate[MA_SIO_NRATE]; + int __pad[7]; + unsigned int nconf; + struct ma_sio_conf confs[MA_SIO_NCONF]; +}; + +typedef struct ma_sio_hdl* (* ma_sio_open_proc) (const char*, unsigned int, int); +typedef void (* ma_sio_close_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_setpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*); +typedef int (* ma_sio_getpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*); +typedef int (* ma_sio_getcap_proc) (struct ma_sio_hdl*, struct ma_sio_cap*); +typedef size_t (* ma_sio_write_proc) (struct ma_sio_hdl*, const void*, size_t); +typedef size_t (* ma_sio_read_proc) (struct ma_sio_hdl*, void*, size_t); +typedef int (* ma_sio_start_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_stop_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_initpar_proc)(struct ma_sio_par*); + +static ma_uint32 ma_get_standard_sample_rate_priority_index__sndio(ma_uint32 sampleRate) /* Lower = higher priority */ +{ + ma_uint32 i; + for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) { + if (g_maStandardSampleRatePriorities[i] == sampleRate) { + return i; + } + } + + return (ma_uint32)-1; +} + +static ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb) +{ + /* We only support native-endian right now. */ + if ((ma_is_little_endian() && le == 0) || (ma_is_big_endian() && le == 1)) { + return ma_format_unknown; + } + + if (bits == 8 && bps == 1 && sig == 0) { + return ma_format_u8; + } + if (bits == 16 && bps == 2 && sig == 1) { + return ma_format_s16; + } + if (bits == 24 && bps == 3 && sig == 1) { + return ma_format_s24; + } + if (bits == 24 && bps == 4 && sig == 1 && msb == 0) { + /*return ma_format_s24_32;*/ + } + if (bits == 32 && bps == 4 && sig == 1) { + return ma_format_s32; + } + + return ma_format_unknown; +} + +static ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps) +{ + ma_format bestFormat; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + + bestFormat = ma_format_unknown; + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format == ma_format_unknown) { + continue; /* Format not supported. */ + } + + if (bestFormat == ma_format_unknown) { + bestFormat = format; + } else { + if (ma_get_format_priority_index(bestFormat) > ma_get_format_priority_index(format)) { /* <-- Lower = better. */ + bestFormat = format; + } + } + } + } + + return bestFormat; +} + +static ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat) +{ + ma_uint32 maxChannels; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); + + /* Just pick whatever configuration has the most channels. */ + maxChannels = 0; + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + /* The encoding should be of requiredFormat. */ + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int iChannel; + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format != requiredFormat) { + continue; + } + + /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + + if (deviceType == ma_device_type_playback) { + chan = caps->confs[iConfig].pchan; + } else { + chan = caps->confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps->pchan[iChannel]; + } else { + channels = caps->rchan[iChannel]; + } + + if (maxChannels < channels) { + maxChannels = channels; + } + } + } + } + + return maxChannels; +} + +static ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels) +{ + ma_uint32 firstSampleRate; + ma_uint32 bestSampleRate; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); + MA_ASSERT(requiredChannels > 0); + MA_ASSERT(requiredChannels <= MA_MAX_CHANNELS); + + firstSampleRate = 0; /* <-- If the device does not support a standard rate we'll fall back to the first one that's found. */ + bestSampleRate = 0; + + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + /* The encoding should be of requiredFormat. */ + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int iChannel; + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format != requiredFormat) { + continue; + } + + /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + unsigned int iRate; + + if (deviceType == ma_device_type_playback) { + chan = caps->confs[iConfig].pchan; + } else { + chan = caps->confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps->pchan[iChannel]; + } else { + channels = caps->rchan[iChannel]; + } + + if (channels != requiredChannels) { + continue; + } + + /* Getting here means we have found a compatible encoding/channel pair. */ + for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) { + ma_uint32 rate = (ma_uint32)caps->rate[iRate]; + ma_uint32 ratePriority; + + if (firstSampleRate == 0) { + firstSampleRate = rate; + } + + /* Disregard this rate if it's not a standard one. */ + ratePriority = ma_get_standard_sample_rate_priority_index__sndio(rate); + if (ratePriority == (ma_uint32)-1) { + continue; + } + + if (ma_get_standard_sample_rate_priority_index__sndio(bestSampleRate) > ratePriority) { /* Lower = better. */ + bestSampleRate = rate; + } + } + } + } + } + + /* If a standard sample rate was not found just fall back to the first one that was iterated. */ + if (bestSampleRate == 0) { + bestSampleRate = firstSampleRate; + } + + return bestSampleRate; +} + + +static ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 isTerminating = MA_FALSE; + struct ma_sio_hdl* handle; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* sndio doesn't seem to have a good device enumeration API, so I'm therefore only enumerating over default devices for now. */ + + /* Playback. */ + if (!isTerminating) { + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_PLAY, 0); + if (handle != NULL) { + /* Supports playback. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), MA_SIO_DEVANY); + ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME); + + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + } + } + + /* Capture. */ + if (!isTerminating) { + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_REC, 0); + if (handle != NULL) { + /* Supports capture. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), "default"); + ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME); + + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + char devid[256]; + struct ma_sio_hdl* handle; + struct ma_sio_cap caps; + unsigned int iConfig; + + MA_ASSERT(pContext != NULL); + + /* We need to open the device before we can get information about it. */ + if (pDeviceID == NULL) { + ma_strcpy_s(devid, sizeof(devid), MA_SIO_DEVANY); + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (deviceType == ma_device_type_playback) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : MA_DEFAULT_CAPTURE_DEVICE_NAME); + } else { + ma_strcpy_s(devid, sizeof(devid), pDeviceID->sndio); + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), devid); + } + + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(devid, (deviceType == ma_device_type_playback) ? MA_SIO_PLAY : MA_SIO_REC, 0); + if (handle == NULL) { + return MA_NO_DEVICE; + } + + if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)(handle, &caps) == 0) { + return MA_ERROR; + } + + pDeviceInfo->nativeDataFormatCount = 0; + + for (iConfig = 0; iConfig < caps.nconf; iConfig += 1) { + /* + The main thing we care about is that the encoding is supported by miniaudio. If it is, we want to give + preference to some formats over others. + */ + unsigned int iEncoding; + unsigned int iChannel; + unsigned int iRate; + + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps.confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps.enc[iEncoding].bits; + bps = caps.enc[iEncoding].bps; + sig = caps.enc[iEncoding].sig; + le = caps.enc[iEncoding].le; + msb = caps.enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format == ma_format_unknown) { + continue; /* Format not supported. */ + } + + + /* Channels. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + + if (deviceType == ma_device_type_playback) { + chan = caps.confs[iConfig].pchan; + } else { + chan = caps.confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps.pchan[iChannel]; + } else { + channels = caps.rchan[iChannel]; + } + + + /* Sample Rates. */ + for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) { + if ((caps.confs[iConfig].rate & (1UL << iRate)) != 0) { + ma_device_info_add_native_data_format(pDeviceInfo, format, channels, caps.rate[iRate], 0); + } + } + } + } + } + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init_handle__sndio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + const char* pDeviceName; + ma_ptr handle; + int openFlags = 0; + struct ma_sio_cap caps; + struct ma_sio_par par; + const ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_capture) { + openFlags = MA_SIO_REC; + } else { + openFlags = MA_SIO_PLAY; + } + + pDeviceID = pDescriptor->pDeviceID; + format = pDescriptor->format; + channels = pDescriptor->channels; + sampleRate = pDescriptor->sampleRate; + + pDeviceName = MA_SIO_DEVANY; + if (pDeviceID != NULL) { + pDeviceName = pDeviceID->sndio; + } + + handle = (ma_ptr)((ma_sio_open_proc)pDevice->pContext->sndio.sio_open)(pDeviceName, openFlags, 0); + if (handle == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to open device."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* We need to retrieve the device caps to determine the most appropriate format to use. */ + if (((ma_sio_getcap_proc)pDevice->pContext->sndio.sio_getcap)((struct ma_sio_hdl*)handle, &caps) == 0) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve device caps."); + return MA_ERROR; + } + + /* + Note: sndio reports a huge range of available channels. This is inconvenient for us because there's no real + way, as far as I can tell, to get the _actual_ channel count of the device. I'm therefore restricting this + to the requested channels, regardless of whether or not the default channel count is requested. + + For hardware devices, I'm suspecting only a single channel count will be reported and we can safely use the + value returned by ma_find_best_channels_from_sio_cap__sndio(). + */ + if (deviceType == ma_device_type_capture) { + if (format == ma_format_unknown) { + format = ma_find_best_format_from_sio_cap__sndio(&caps); + } + + if (channels == 0) { + if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) { + channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format); + } else { + channels = MA_DEFAULT_CHANNELS; + } + } + } else { + if (format == ma_format_unknown) { + format = ma_find_best_format_from_sio_cap__sndio(&caps); + } + + if (channels == 0) { + if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) { + channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format); + } else { + channels = MA_DEFAULT_CHANNELS; + } + } + } + + if (sampleRate == 0) { + sampleRate = ma_find_best_sample_rate_from_sio_cap__sndio(&caps, pConfig->deviceType, format, channels); + } + + + ((ma_sio_initpar_proc)pDevice->pContext->sndio.sio_initpar)(&par); + par.msb = 0; + par.le = ma_is_little_endian(); + + switch (format) { + case ma_format_u8: + { + par.bits = 8; + par.bps = 1; + par.sig = 0; + } break; + + case ma_format_s24: + { + par.bits = 24; + par.bps = 3; + par.sig = 1; + } break; + + case ma_format_s32: + { + par.bits = 32; + par.bps = 4; + par.sig = 1; + } break; + + case ma_format_s16: + case ma_format_f32: + case ma_format_unknown: + default: + { + par.bits = 16; + par.bps = 2; + par.sig = 1; + } break; + } + + if (deviceType == ma_device_type_capture) { + par.rchan = channels; + } else { + par.pchan = channels; + } + + par.rate = sampleRate; + + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, par.rate, pConfig->performanceProfile); + + par.round = internalPeriodSizeInFrames; + par.appbufsz = par.round * pDescriptor->periodCount; + + if (((ma_sio_setpar_proc)pDevice->pContext->sndio.sio_setpar)((struct ma_sio_hdl*)handle, &par) == 0) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to set buffer size."); + return MA_ERROR; + } + + if (((ma_sio_getpar_proc)pDevice->pContext->sndio.sio_getpar)((struct ma_sio_hdl*)handle, &par) == 0) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve buffer size."); + return MA_ERROR; + } + + internalFormat = ma_format_from_sio_enc__sndio(par.bits, par.bps, par.sig, par.le, par.msb); + internalChannels = (deviceType == ma_device_type_capture) ? par.rchan : par.pchan; + internalSampleRate = par.rate; + internalPeriods = par.appbufsz / par.round; + internalPeriodSizeInFrames = par.round; + + if (deviceType == ma_device_type_capture) { + pDevice->sndio.handleCapture = handle; + } else { + pDevice->sndio.handlePlayback = handle; + } + + pDescriptor->format = internalFormat; + pDescriptor->channels = internalChannels; + pDescriptor->sampleRate = internalSampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_sndio, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), internalChannels); + pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames; + pDescriptor->periodCount = internalPeriods; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__sndio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->sndio); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_handle__sndio(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_handle__sndio(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); /* <-- Doesn't actually playback until data is written. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + From the documentation: + + The sio_stop() function puts the audio subsystem in the same state as before sio_start() is called. It stops recording, drains the play buffer and then + stops playback. If samples to play are queued but playback hasn't started yet then playback is forced immediately; playback will actually stop once the + buffer is drained. In no case are samples in the play buffer discarded. + + Therefore, sio_stop() performs all of the necessary draining for us. + */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int result; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + result = ((ma_sio_write_proc)pDevice->pContext->sndio.sio_write)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (result == 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to send data from the client to the device."); + return MA_IO_ERROR; + } + + if (pFramesWritten != NULL) { + *pFramesWritten = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int result; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = ((ma_sio_read_proc)pDevice->pContext->sndio.sio_read)((struct ma_sio_hdl*)pDevice->sndio.handleCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (result == 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to read data from the device to be sent to the device."); + return MA_IO_ERROR; + } + + if (pFramesRead != NULL) { + *pFramesRead = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__sndio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_sndio); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__sndio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libsndioNames[] = { + "libsndio.so" + }; + size_t i; + + for (i = 0; i < ma_countof(libsndioNames); ++i) { + pContext->sndio.sndioSO = ma_dlopen(ma_context_get_log(pContext), libsndioNames[i]); + if (pContext->sndio.sndioSO != NULL) { + break; + } + } + + if (pContext->sndio.sndioSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->sndio.sio_open = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_open"); + pContext->sndio.sio_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_close"); + pContext->sndio.sio_setpar = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_setpar"); + pContext->sndio.sio_getpar = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_getpar"); + pContext->sndio.sio_getcap = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_getcap"); + pContext->sndio.sio_write = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_write"); + pContext->sndio.sio_read = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_read"); + pContext->sndio.sio_start = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_start"); + pContext->sndio.sio_stop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_stop"); + pContext->sndio.sio_initpar = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_initpar"); +#else + pContext->sndio.sio_open = sio_open; + pContext->sndio.sio_close = sio_close; + pContext->sndio.sio_setpar = sio_setpar; + pContext->sndio.sio_getpar = sio_getpar; + pContext->sndio.sio_getcap = sio_getcap; + pContext->sndio.sio_write = sio_write; + pContext->sndio.sio_read = sio_read; + pContext->sndio.sio_start = sio_start; + pContext->sndio.sio_stop = sio_stop; + pContext->sndio.sio_initpar = sio_initpar; +#endif + + pCallbacks->onContextInit = ma_context_init__sndio; + pCallbacks->onContextUninit = ma_context_uninit__sndio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__sndio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__sndio; + pCallbacks->onDeviceInit = ma_device_init__sndio; + pCallbacks->onDeviceUninit = ma_device_uninit__sndio; + pCallbacks->onDeviceStart = ma_device_start__sndio; + pCallbacks->onDeviceStop = ma_device_stop__sndio; + pCallbacks->onDeviceRead = ma_device_read__sndio; + pCallbacks->onDeviceWrite = ma_device_write__sndio; + pCallbacks->onDeviceDataLoop = NULL; + + (void)pConfig; + return MA_SUCCESS; +} +#endif /* sndio */ + + + +/****************************************************************************** + +audio(4) Backend + +******************************************************************************/ +#ifdef MA_HAS_AUDIO4 +#include +#include +#include +#include +#include +#include +#include + +#if defined(__OpenBSD__) +#include +#if defined(OpenBSD) && OpenBSD >= 201709 +#define MA_AUDIO4_USE_NEW_API +#endif +#endif + +static void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex) +{ + size_t baseLen; + + MA_ASSERT(id != NULL); + MA_ASSERT(idSize > 0); + MA_ASSERT(deviceIndex >= 0); + + baseLen = strlen(base); + MA_ASSERT(idSize > baseLen); + + ma_strcpy_s(id, idSize, base); + ma_itoa_s(deviceIndex, id+baseLen, idSize-baseLen, 10); +} + +static ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut) +{ + size_t idLen; + size_t baseLen; + const char* deviceIndexStr; + + MA_ASSERT(id != NULL); + MA_ASSERT(base != NULL); + MA_ASSERT(pIndexOut != NULL); + + idLen = strlen(id); + baseLen = strlen(base); + if (idLen <= baseLen) { + return MA_ERROR; /* Doesn't look like the id starts with the base. */ + } + + if (strncmp(id, base, baseLen) != 0) { + return MA_ERROR; /* ID does not begin with base. */ + } + + deviceIndexStr = id + baseLen; + if (deviceIndexStr[0] == '\0') { + return MA_ERROR; /* No index specified in the ID. */ + } + + if (pIndexOut) { + *pIndexOut = atoi(deviceIndexStr); + } + + return MA_SUCCESS; +} + + +#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ +static ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision) +{ + if (precision == 8 && (encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR_LE || encoding == AUDIO_ENCODING_ULINEAR_BE)) { + return ma_format_u8; + } else { + if (ma_is_little_endian() && encoding == AUDIO_ENCODING_SLINEAR_LE) { + if (precision == 16) { + return ma_format_s16; + } else if (precision == 24) { + return ma_format_s24; + } else if (precision == 32) { + return ma_format_s32; + } + } else if (ma_is_big_endian() && encoding == AUDIO_ENCODING_SLINEAR_BE) { + if (precision == 16) { + return ma_format_s16; + } else if (precision == 24) { + return ma_format_s24; + } else if (precision == 32) { + return ma_format_s32; + } + } + } + + return ma_format_unknown; /* Encoding not supported. */ +} + +static void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision) +{ + MA_ASSERT(pEncoding != NULL); + MA_ASSERT(pPrecision != NULL); + + switch (format) + { + case ma_format_u8: + { + *pEncoding = AUDIO_ENCODING_ULINEAR; + *pPrecision = 8; + } break; + + case ma_format_s24: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 24; + } break; + + case ma_format_s32: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 32; + } break; + + case ma_format_s16: + case ma_format_f32: + case ma_format_unknown: + default: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 16; + } break; + } +} + +static ma_format ma_format_from_prinfo__audio4(struct audio_prinfo* prinfo) +{ + return ma_format_from_encoding__audio4(prinfo->encoding, prinfo->precision); +} + +static ma_format ma_best_format_from_fd__audio4(int fd, ma_format preferredFormat) +{ + audio_encoding_t encoding; + ma_uint32 iFormat; + int counter = 0; + + /* First check to see if the preferred format is supported. */ + if (preferredFormat != ma_format_unknown) { + counter = 0; + for (;;) { + MA_ZERO_OBJECT(&encoding); + encoding.index = counter; + if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { + break; + } + + if (preferredFormat == ma_format_from_encoding__audio4(encoding.encoding, encoding.precision)) { + return preferredFormat; /* Found the preferred format. */ + } + + /* Getting here means this encoding does not match our preferred format so we need to more on to the next encoding. */ + counter += 1; + } + } + + /* Getting here means our preferred format is not supported, so fall back to our standard priorities. */ + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); iFormat += 1) { + ma_format format = g_maFormatPriorities[iFormat]; + + counter = 0; + for (;;) { + MA_ZERO_OBJECT(&encoding); + encoding.index = counter; + if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { + break; + } + + if (format == ma_format_from_encoding__audio4(encoding.encoding, encoding.precision)) { + return format; /* Found a workable format. */ + } + + /* Getting here means this encoding does not match our preferred format so we need to more on to the next encoding. */ + counter += 1; + } + } + + /* Getting here means not appropriate format was found. */ + return ma_format_unknown; +} +#else +static ma_format ma_format_from_swpar__audio4(struct audio_swpar* par) +{ + if (par->bits == 8 && par->bps == 1 && par->sig == 0) { + return ma_format_u8; + } + if (par->bits == 16 && par->bps == 2 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_s16; + } + if (par->bits == 24 && par->bps == 3 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_s24; + } + if (par->bits == 32 && par->bps == 4 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_f32; + } + + /* Format not supported. */ + return ma_format_unknown; +} +#endif + +static ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_device_type deviceType, int fd, ma_device_info* pDeviceInfo) +{ + audio_device_t fdDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(fd >= 0); + MA_ASSERT(pDeviceInfo != NULL); + + (void)pContext; + (void)deviceType; + + if (ioctl(fd, AUDIO_GETDEV, &fdDevice) < 0) { + return MA_ERROR; /* Failed to retrieve device info. */ + } + + /* Name. */ + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), fdDevice.name); + +#if !defined(MA_AUDIO4_USE_NEW_API) + { + audio_info_t fdInfo; + int counter = 0; + ma_uint32 channels; + ma_uint32 sampleRate; + + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + return MA_ERROR; + } + + if (deviceType == ma_device_type_playback) { + channels = fdInfo.play.channels; + sampleRate = fdInfo.play.sample_rate; + } else { + channels = fdInfo.record.channels; + sampleRate = fdInfo.record.sample_rate; + } + + /* Supported formats. We get this by looking at the encodings. */ + pDeviceInfo->nativeDataFormatCount = 0; + for (;;) { + audio_encoding_t encoding; + ma_format format; + + MA_ZERO_OBJECT(&encoding); + encoding.index = counter; + if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { + break; + } + + format = ma_format_from_encoding__audio4(encoding.encoding, encoding.precision); + if (format != ma_format_unknown) { + ma_device_info_add_native_data_format(pDeviceInfo, format, channels, sampleRate, 0); + } + + counter += 1; + } + } +#else + { + struct audio_swpar fdPar; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + return MA_ERROR; + } + + format = ma_format_from_swpar__audio4(&fdPar); + if (format == ma_format_unknown) { + return MA_FORMAT_NOT_SUPPORTED; + } + + if (deviceType == ma_device_type_playback) { + channels = fdPar.pchan; + } else { + channels = fdPar.rchan; + } + + sampleRate = fdPar.rate; + + pDeviceInfo->nativeDataFormatCount = 0; + ma_device_info_add_native_data_format(pDeviceInfo, format, channels, sampleRate, 0); + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + const int maxDevices = 64; + char devpath[256]; + int iDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* + Every device will be named "/dev/audioN", with a "/dev/audioctlN" equivalent. We use the "/dev/audioctlN" + version here since we can open it even when another process has control of the "/dev/audioN" device. + */ + for (iDevice = 0; iDevice < maxDevices; ++iDevice) { + struct stat st; + int fd; + ma_bool32 isTerminating = MA_FALSE; + + ma_strcpy_s(devpath, sizeof(devpath), "/dev/audioctl"); + ma_itoa_s(iDevice, devpath+strlen(devpath), sizeof(devpath)-strlen(devpath), 10); + + if (stat(devpath, &st) < 0) { + break; + } + + /* The device exists, but we need to check if it's usable as playback and/or capture. */ + + /* Playback. */ + if (!isTerminating) { + fd = open(devpath, O_RDONLY, 0); + if (fd >= 0) { + /* Supports playback. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); + if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_playback, fd, &deviceInfo) == MA_SUCCESS) { + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + close(fd); + } + } + + /* Capture. */ + if (!isTerminating) { + fd = open(devpath, O_WRONLY, 0); + if (fd >= 0) { + /* Supports capture. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); + if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_capture, fd, &deviceInfo) == MA_SUCCESS) { + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + close(fd); + } + } + + if (isTerminating) { + break; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + int fd = -1; + int deviceIndex = -1; + char ctlid[256]; + ma_result result; + + MA_ASSERT(pContext != NULL); + + /* + We need to open the "/dev/audioctlN" device to get the info. To do this we need to extract the number + from the device ID which will be in "/dev/audioN" format. + */ + if (pDeviceID == NULL) { + /* Default device. */ + ma_strcpy_s(ctlid, sizeof(ctlid), "/dev/audioctl"); + } else { + /* Specific device. We need to convert from "/dev/audioN" to "/dev/audioctlN". */ + result = ma_extract_device_index_from_id__audio4(pDeviceID->audio4, "/dev/audio", &deviceIndex); + if (result != MA_SUCCESS) { + return result; + } + + ma_construct_device_id__audio4(ctlid, sizeof(ctlid), "/dev/audioctl", deviceIndex); + } + + fd = open(ctlid, (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY, 0); + if (fd == -1) { + return MA_NO_DEVICE; + } + + if (deviceIndex == -1) { + ma_strcpy_s(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio"); + } else { + ma_construct_device_id__audio4(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio", deviceIndex); + } + + result = ma_context_get_device_info_from_fd__audio4(pContext, deviceType, fd, pDeviceInfo); + + close(fd); + return result; +} + +static ma_result ma_device_uninit__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + close(pDevice->audio4.fdCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + close(pDevice->audio4.fdPlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init_fd__audio4(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + const char* pDefaultDeviceNames[] = { + "/dev/audio", + "/dev/audio0" + }; + const char* pDefaultDeviceCtlNames[] = { + "/dev/audioctl", + "/dev/audioctl0" + }; + int fd; + int fdFlags = 0; + size_t iDefaultDevice = (size_t)-1; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + /* The first thing to do is open the file. */ + if (deviceType == ma_device_type_capture) { + fdFlags = O_RDONLY; + } else { + fdFlags = O_WRONLY; + } + /*fdFlags |= O_NONBLOCK;*/ + + /* Find the index of the default device as a start. We'll use this index later. Set it to (size_t)-1 otherwise. */ + if (pDescriptor->pDeviceID == NULL) { + /* Default device. */ + for (iDefaultDevice = 0; iDefaultDevice < ma_countof(pDefaultDeviceNames); ++iDefaultDevice) { + fd = open(pDefaultDeviceNames[iDefaultDevice], fdFlags, 0); + if (fd != -1) { + break; + } + } + } else { + /* Specific device. */ + fd = open(pDescriptor->pDeviceID->audio4, fdFlags, 0); + + for (iDefaultDevice = 0; iDefaultDevice < ma_countof(pDefaultDeviceNames); iDefaultDevice += 1) { + if (ma_strcmp(pDefaultDeviceNames[iDefaultDevice], pDescriptor->pDeviceID->audio4) == 0) { + break; + } + } + + if (iDefaultDevice == ma_countof(pDefaultDeviceNames)) { + iDefaultDevice = (size_t)-1; + } + } + + if (fd == -1) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to open device."); + return ma_result_from_errno(errno); + } + +#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ + { + audio_info_t fdInfo; + int fdInfoResult = -1; + + /* + The documentation is a little bit unclear to me as to how it handles formats. It says the + following: + + Regardless of formats supported by underlying driver, the audio driver accepts the + following formats. + + By then the next sentence says this: + + `encoding` and `precision` are one of the values obtained by AUDIO_GETENC. + + It sounds like a direct contradiction to me. I'm going to play this safe any only use the + best sample format returned by AUDIO_GETENC. If the requested format is supported we'll + use that, but otherwise we'll just use our standard format priorities to pick an + appropriate one. + */ + AUDIO_INITINFO(&fdInfo); + + /* + Get the default format from the audioctl file if we're asking for a default device. If we + retrieve it from /dev/audio it'll default to mono 8000Hz. + */ + if (iDefaultDevice != (size_t)-1) { + /* We're using a default device. Get the info from the /dev/audioctl file instead of /dev/audio. */ + int fdctl = open(pDefaultDeviceCtlNames[iDefaultDevice], fdFlags, 0); + if (fdctl != -1) { + fdInfoResult = ioctl(fdctl, AUDIO_GETINFO, &fdInfo); + close(fdctl); + } + } + + if (fdInfoResult == -1) { + /* We still don't have the default device info so just retrieve it from the main audio device. */ + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] AUDIO_GETINFO failed."); + return ma_result_from_errno(errno); + } + } + + /* We get the driver to do as much of the data conversion as possible. */ + if (deviceType == ma_device_type_capture) { + fdInfo.mode = AUMODE_RECORD; + ma_encoding_from_format__audio4(ma_best_format_from_fd__audio4(fd, pDescriptor->format), &fdInfo.record.encoding, &fdInfo.record.precision); + + if (pDescriptor->channels != 0) { + fdInfo.record.channels = ma_clamp(pDescriptor->channels, 1, 12); /* From the documentation: `channels` ranges from 1 to 12. */ + } + + if (pDescriptor->sampleRate != 0) { + fdInfo.record.sample_rate = ma_clamp(pDescriptor->sampleRate, 1000, 192000); /* From the documentation: `frequency` ranges from 1000Hz to 192000Hz. (They mean `sample_rate` instead of `frequency`.) */ + } + } else { + fdInfo.mode = AUMODE_PLAY; + ma_encoding_from_format__audio4(ma_best_format_from_fd__audio4(fd, pDescriptor->format), &fdInfo.play.encoding, &fdInfo.play.precision); + + if (pDescriptor->channels != 0) { + fdInfo.play.channels = ma_clamp(pDescriptor->channels, 1, 12); /* From the documentation: `channels` ranges from 1 to 12. */ + } + + if (pDescriptor->sampleRate != 0) { + fdInfo.play.sample_rate = ma_clamp(pDescriptor->sampleRate, 1000, 192000); /* From the documentation: `frequency` ranges from 1000Hz to 192000Hz. (They mean `sample_rate` instead of `frequency`.) */ + } + } + + if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device format. AUDIO_SETINFO failed."); + return ma_result_from_errno(errno); + } + + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] AUDIO_GETINFO failed."); + return ma_result_from_errno(errno); + } + + if (deviceType == ma_device_type_capture) { + internalFormat = ma_format_from_prinfo__audio4(&fdInfo.record); + internalChannels = fdInfo.record.channels; + internalSampleRate = fdInfo.record.sample_rate; + } else { + internalFormat = ma_format_from_prinfo__audio4(&fdInfo.play); + internalChannels = fdInfo.play.channels; + internalSampleRate = fdInfo.play.sample_rate; + } + + if (internalFormat == ma_format_unknown) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable."); + return MA_FORMAT_NOT_SUPPORTED; + } + + /* Buffer. */ + { + ma_uint32 internalPeriodSizeInBytes; + + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, internalSampleRate, pConfig->performanceProfile); + + internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (internalPeriodSizeInBytes < 16) { + internalPeriodSizeInBytes = 16; + } + + internalPeriods = pDescriptor->periodCount; + if (internalPeriods < 2) { + internalPeriods = 2; + } + + /* What miniaudio calls a period, audio4 calls a block. */ + AUDIO_INITINFO(&fdInfo); + fdInfo.hiwat = internalPeriods; + fdInfo.lowat = internalPeriods-1; + fdInfo.blocksize = internalPeriodSizeInBytes; + if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to set internal buffer size. AUDIO_SETINFO failed."); + return ma_result_from_errno(errno); + } + + internalPeriods = fdInfo.hiwat; + internalPeriodSizeInFrames = fdInfo.blocksize / ma_get_bytes_per_frame(internalFormat, internalChannels); + } + } +#else + { + struct audio_swpar fdPar; + + /* We need to retrieve the format of the device so we can know the channel count and sample rate. Then we can calculate the buffer size. */ + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve initial device parameters."); + return ma_result_from_errno(errno); + } + + internalFormat = ma_format_from_swpar__audio4(&fdPar); + internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan; + internalSampleRate = fdPar.rate; + + if (internalFormat == ma_format_unknown) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable."); + return MA_FORMAT_NOT_SUPPORTED; + } + + /* Buffer. */ + { + ma_uint32 internalPeriodSizeInBytes; + + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, internalSampleRate, pConfig->performanceProfile); + + /* What miniaudio calls a period, audio4 calls a block. */ + internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (internalPeriodSizeInBytes < 16) { + internalPeriodSizeInBytes = 16; + } + + fdPar.nblks = pDescriptor->periodCount; + fdPar.round = internalPeriodSizeInBytes; + + if (ioctl(fd, AUDIO_SETPAR, &fdPar) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device parameters."); + return ma_result_from_errno(errno); + } + + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve actual device parameters."); + return ma_result_from_errno(errno); + } + } + + internalFormat = ma_format_from_swpar__audio4(&fdPar); + internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan; + internalSampleRate = fdPar.rate; + internalPeriods = fdPar.nblks; + internalPeriodSizeInFrames = fdPar.round / ma_get_bytes_per_frame(internalFormat, internalChannels); + } +#endif + + if (internalFormat == ma_format_unknown) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable."); + return MA_FORMAT_NOT_SUPPORTED; + } + + if (deviceType == ma_device_type_capture) { + pDevice->audio4.fdCapture = fd; + } else { + pDevice->audio4.fdPlayback = fd; + } + + pDescriptor->format = internalFormat; + pDescriptor->channels = internalChannels; + pDescriptor->sampleRate = internalSampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_sound4, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), internalChannels); + pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames; + pDescriptor->periodCount = internalPeriods; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__audio4(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->audio4); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + pDevice->audio4.fdCapture = -1; + pDevice->audio4.fdPlayback = -1; + + /* + The version of the operating system dictates whether or not the device is exclusive or shared. NetBSD + introduced in-kernel mixing which means it's shared. All other BSD flavours are exclusive as far as + I'm aware. + */ +#if defined(__NetBSD_Version__) && __NetBSD_Version__ >= 800000000 + /* NetBSD 8.0+ */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } +#else + /* All other flavors. */ +#endif + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__audio4(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__audio4(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + close(pDevice->audio4.fdCapture); + } + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->audio4.fdCapture == -1) { + return MA_INVALID_ARGS; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->audio4.fdPlayback == -1) { + return MA_INVALID_ARGS; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd) +{ + if (fd == -1) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_AUDIO4_USE_NEW_API) + if (ioctl(fd, AUDIO_FLUSH, 0) < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_FLUSH failed."); + return ma_result_from_errno(errno); + } +#else + if (ioctl(fd, AUDIO_STOP, 0) < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_STOP failed."); + return ma_result_from_errno(errno); + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result; + + result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result; + + /* Drain the device first. If this fails we'll just need to flush without draining. Unfortunately draining isn't available on newer version of OpenBSD. */ +#if !defined(MA_AUDIO4_USE_NEW_API) + ioctl(pDevice->audio4.fdPlayback, AUDIO_DRAIN, 0); +#endif + + /* Here is where the device is stopped immediately. */ + result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int result; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + result = write(pDevice->audio4.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (result < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to write data to the device."); + return ma_result_from_errno(errno); + } + + if (pFramesWritten != NULL) { + *pFramesWritten = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int result; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = read(pDevice->audio4.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (result < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to read data from the device."); + return ma_result_from_errno(errno); + } + + if (pFramesRead != NULL) { + *pFramesRead = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__audio4(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_audio4); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__audio4(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pCallbacks->onContextInit = ma_context_init__audio4; + pCallbacks->onContextUninit = ma_context_uninit__audio4; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__audio4; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__audio4; + pCallbacks->onDeviceInit = ma_device_init__audio4; + pCallbacks->onDeviceUninit = ma_device_uninit__audio4; + pCallbacks->onDeviceStart = ma_device_start__audio4; + pCallbacks->onDeviceStop = ma_device_stop__audio4; + pCallbacks->onDeviceRead = ma_device_read__audio4; + pCallbacks->onDeviceWrite = ma_device_write__audio4; + pCallbacks->onDeviceDataLoop = NULL; + + return MA_SUCCESS; +} +#endif /* audio4 */ + + +/****************************************************************************** + +OSS Backend + +******************************************************************************/ +#ifdef MA_HAS_OSS +#include +#include +#include +#include + +#ifndef SNDCTL_DSP_HALT +#define SNDCTL_DSP_HALT SNDCTL_DSP_RESET +#endif + +#define MA_OSS_DEFAULT_DEVICE_NAME "/dev/dsp" + +static int ma_open_temp_device__oss() +{ + /* The OSS sample code uses "/dev/mixer" as the device for getting system properties so I'm going to do the same. */ + int fd = open("/dev/mixer", O_RDONLY, 0); + if (fd >= 0) { + return fd; + } + + return -1; +} + +static ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, int* pfd) +{ + const char* deviceName; + int flags; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pfd != NULL); + (void)pContext; + + *pfd = -1; + + /* This function should only be called for playback or capture, not duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + deviceName = MA_OSS_DEFAULT_DEVICE_NAME; + if (pDeviceID != NULL) { + deviceName = pDeviceID->oss; + } + + flags = (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY; + if (shareMode == ma_share_mode_exclusive) { + flags |= O_EXCL; + } + + *pfd = open(deviceName, flags, 0); + if (*pfd == -1) { + return ma_result_from_errno(errno); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + int fd; + oss_sysinfo si; + int result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + fd = ma_open_temp_device__oss(); + if (fd == -1) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration."); + return MA_NO_BACKEND; + } + + result = ioctl(fd, SNDCTL_SYSINFO, &si); + if (result != -1) { + int iAudioDevice; + for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) { + oss_audioinfo ai; + ai.dev = iAudioDevice; + result = ioctl(fd, SNDCTL_AUDIOINFO, &ai); + if (result != -1) { + if (ai.devnode[0] != '\0') { /* <-- Can be blank, according to documentation. */ + ma_device_info deviceInfo; + ma_bool32 isTerminating = MA_FALSE; + + MA_ZERO_OBJECT(&deviceInfo); + + /* ID */ + ma_strncpy_s(deviceInfo.id.oss, sizeof(deviceInfo.id.oss), ai.devnode, (size_t)-1); + + /* + The human readable device name should be in the "ai.handle" variable, but it can + sometimes be empty in which case we just fall back to "ai.name" which is less user + friendly, but usually has a value. + */ + if (ai.handle[0] != '\0') { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.handle, (size_t)-1); + } else { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.name, (size_t)-1); + } + + /* The device can be both playback and capture. */ + if (!isTerminating && (ai.caps & PCM_CAP_OUTPUT) != 0) { + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + if (!isTerminating && (ai.caps & PCM_CAP_INPUT) != 0) { + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + if (isTerminating) { + break; + } + } + } + } + } else { + close(fd); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration."); + return MA_NO_BACKEND; + } + + close(fd); + return MA_SUCCESS; +} + +static void ma_context_add_native_data_format__oss(ma_context* pContext, oss_audioinfo* pAudioInfo, ma_format format, ma_device_info* pDeviceInfo) +{ + unsigned int minChannels; + unsigned int maxChannels; + unsigned int iRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pAudioInfo != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + /* If we support all channels we just report 0. */ + minChannels = ma_clamp(pAudioInfo->min_channels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + maxChannels = ma_clamp(pAudioInfo->max_channels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + + /* + OSS has this annoying thing where sample rates can be reported in two ways. We prefer explicitness, + which OSS has in the form of nrates/rates, however there are times where nrates can be 0, in which + case we'll need to use min_rate and max_rate and report only standard rates. + */ + if (pAudioInfo->nrates > 0) { + for (iRate = 0; iRate < pAudioInfo->nrates; iRate += 1) { + unsigned int rate = pAudioInfo->rates[iRate]; + + if (minChannels == MA_MIN_CHANNELS && maxChannels == MA_MAX_CHANNELS) { + ma_device_info_add_native_data_format(pDeviceInfo, format, 0, rate, 0); /* Set the channel count to 0 to indicate that all channel counts are supported. */ + } else { + unsigned int iChannel; + for (iChannel = minChannels; iChannel <= maxChannels; iChannel += 1) { + ma_device_info_add_native_data_format(pDeviceInfo, format, iChannel, rate, 0); + } + } + } + } else { + for (iRate = 0; iRate < ma_countof(g_maStandardSampleRatePriorities); iRate += 1) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iRate]; + + if (standardRate >= (ma_uint32)pAudioInfo->min_rate && standardRate <= (ma_uint32)pAudioInfo->max_rate) { + if (minChannels == MA_MIN_CHANNELS && maxChannels == MA_MAX_CHANNELS) { + ma_device_info_add_native_data_format(pDeviceInfo, format, 0, standardRate, 0); /* Set the channel count to 0 to indicate that all channel counts are supported. */ + } else { + unsigned int iChannel; + for (iChannel = minChannels; iChannel <= maxChannels; iChannel += 1) { + ma_device_info_add_native_data_format(pDeviceInfo, format, iChannel, standardRate, 0); + } + } + } + } + } +} + +static ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_bool32 foundDevice; + int fdTemp; + oss_sysinfo si; + int result; + + MA_ASSERT(pContext != NULL); + + /* Handle the default device a little differently. */ + if (pDeviceID == NULL) { + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + return MA_SUCCESS; + } + + + /* If we get here it means we are _not_ using the default device. */ + foundDevice = MA_FALSE; + + fdTemp = ma_open_temp_device__oss(); + if (fdTemp == -1) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration."); + return MA_NO_BACKEND; + } + + result = ioctl(fdTemp, SNDCTL_SYSINFO, &si); + if (result != -1) { + int iAudioDevice; + for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) { + oss_audioinfo ai; + ai.dev = iAudioDevice; + result = ioctl(fdTemp, SNDCTL_AUDIOINFO, &ai); + if (result != -1) { + if (ma_strcmp(ai.devnode, pDeviceID->oss) == 0) { + /* It has the same name, so now just confirm the type. */ + if ((deviceType == ma_device_type_playback && ((ai.caps & PCM_CAP_OUTPUT) != 0)) || + (deviceType == ma_device_type_capture && ((ai.caps & PCM_CAP_INPUT) != 0))) { + unsigned int formatMask; + + /* ID */ + ma_strncpy_s(pDeviceInfo->id.oss, sizeof(pDeviceInfo->id.oss), ai.devnode, (size_t)-1); + + /* + The human readable device name should be in the "ai.handle" variable, but it can + sometimes be empty in which case we just fall back to "ai.name" which is less user + friendly, but usually has a value. + */ + if (ai.handle[0] != '\0') { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.handle, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.name, (size_t)-1); + } + + + pDeviceInfo->nativeDataFormatCount = 0; + + if (deviceType == ma_device_type_playback) { + formatMask = ai.oformats; + } else { + formatMask = ai.iformats; + } + + if (((formatMask & AFMT_S16_LE) != 0 && ma_is_little_endian()) || (AFMT_S16_BE && ma_is_big_endian())) { + ma_context_add_native_data_format__oss(pContext, &ai, ma_format_s16, pDeviceInfo); + } + if (((formatMask & AFMT_S32_LE) != 0 && ma_is_little_endian()) || (AFMT_S32_BE && ma_is_big_endian())) { + ma_context_add_native_data_format__oss(pContext, &ai, ma_format_s32, pDeviceInfo); + } + if ((formatMask & AFMT_U8) != 0) { + ma_context_add_native_data_format__oss(pContext, &ai, ma_format_u8, pDeviceInfo); + } + + foundDevice = MA_TRUE; + break; + } + } + } + } + } else { + close(fdTemp); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration."); + return MA_NO_BACKEND; + } + + + close(fdTemp); + + if (!foundDevice) { + return MA_NO_DEVICE; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + close(pDevice->oss.fdCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + close(pDevice->oss.fdPlayback); + } + + return MA_SUCCESS; +} + +static int ma_format_to_oss(ma_format format) +{ + int ossFormat = AFMT_U8; + switch (format) { + case ma_format_s16: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break; + case ma_format_s24: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break; + case ma_format_s32: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break; + case ma_format_f32: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break; + case ma_format_u8: + default: ossFormat = AFMT_U8; break; + } + + return ossFormat; +} + +static ma_format ma_format_from_oss(int ossFormat) +{ + if (ossFormat == AFMT_U8) { + return ma_format_u8; + } else { + if (ma_is_little_endian()) { + switch (ossFormat) { + case AFMT_S16_LE: return ma_format_s16; + case AFMT_S32_LE: return ma_format_s32; + default: return ma_format_unknown; + } + } else { + switch (ossFormat) { + case AFMT_S16_BE: return ma_format_s16; + case AFMT_S32_BE: return ma_format_s32; + default: return ma_format_unknown; + } + } + } + + return ma_format_unknown; +} + +static ma_result ma_device_init_fd__oss(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + ma_result result; + int ossResult; + int fd; + const ma_device_id* pDeviceID = NULL; + ma_share_mode shareMode; + int ossFormat; + int ossChannels; + int ossSampleRate; + int ossFragment; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + + pDeviceID = pDescriptor->pDeviceID; + shareMode = pDescriptor->shareMode; + ossFormat = ma_format_to_oss((pDescriptor->format != ma_format_unknown) ? pDescriptor->format : ma_format_s16); /* Use s16 by default because OSS doesn't like floating point. */ + ossChannels = (int)(pDescriptor->channels > 0) ? pDescriptor->channels : MA_DEFAULT_CHANNELS; + ossSampleRate = (int)(pDescriptor->sampleRate > 0) ? pDescriptor->sampleRate : MA_DEFAULT_SAMPLE_RATE; + + result = ma_context_open_device__oss(pDevice->pContext, deviceType, pDeviceID, shareMode, &fd); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device."); + return result; + } + + /* + The OSS documantation is very clear about the order we should be initializing the device's properties: + 1) Format + 2) Channels + 3) Sample rate. + */ + + /* Format. */ + ossResult = ioctl(fd, SNDCTL_DSP_SETFMT, &ossFormat); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set format."); + return ma_result_from_errno(errno); + } + + /* Channels. */ + ossResult = ioctl(fd, SNDCTL_DSP_CHANNELS, &ossChannels); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set channel count."); + return ma_result_from_errno(errno); + } + + /* Sample Rate. */ + ossResult = ioctl(fd, SNDCTL_DSP_SPEED, &ossSampleRate); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set sample rate."); + return ma_result_from_errno(errno); + } + + /* + Buffer. + + The documentation says that the fragment settings should be set as soon as possible, but I'm not sure if + it should be done before or after format/channels/rate. + + OSS wants the fragment size in bytes and a power of 2. When setting, we specify the power, not the actual + value. + */ + { + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInBytes; + ma_uint32 ossFragmentSizePower; + + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, (ma_uint32)ossSampleRate, pConfig->performanceProfile); + + periodSizeInBytes = ma_round_to_power_of_2(periodSizeInFrames * ma_get_bytes_per_frame(ma_format_from_oss(ossFormat), ossChannels)); + if (periodSizeInBytes < 16) { + periodSizeInBytes = 16; + } + + ossFragmentSizePower = 4; + periodSizeInBytes >>= 4; + while (periodSizeInBytes >>= 1) { + ossFragmentSizePower += 1; + } + + ossFragment = (int)((pConfig->periods << 16) | ossFragmentSizePower); + ossResult = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &ossFragment); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set fragment size and period count."); + return ma_result_from_errno(errno); + } + } + + /* Internal settings. */ + if (deviceType == ma_device_type_capture) { + pDevice->oss.fdCapture = fd; + } else { + pDevice->oss.fdPlayback = fd; + } + + pDescriptor->format = ma_format_from_oss(ossFormat); + pDescriptor->channels = ossChannels; + pDescriptor->sampleRate = ossSampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_sound4, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), pDescriptor->channels); + pDescriptor->periodCount = (ma_uint32)(ossFragment >> 16); + pDescriptor->periodSizeInFrames = (ma_uint32)(1 << (ossFragment & 0xFFFF)) / ma_get_bytes_per_frame(pDescriptor->format, pDescriptor->channels); + + if (pDescriptor->format == ma_format_unknown) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio."); + return MA_FORMAT_NOT_SUPPORTED; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__oss(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + + MA_ZERO_OBJECT(&pDevice->oss); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__oss(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device."); + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__oss(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device."); + return result; + } + } + + return MA_SUCCESS; +} + +/* +Note on Starting and Stopping +============================= +In the past I was using SNDCTL_DSP_HALT to stop the device, however this results in issues when +trying to resume the device again. If we use SNDCTL_DSP_HALT, the next write() or read() will +fail. Instead what we need to do is just not write or read to and from the device when the +device is not running. + +As a result, both the start and stop functions for OSS are just empty stubs. The starting and +stopping logic is handled by ma_device_write__oss() and ma_device_read__oss(). These will check +the device state, and if the device is stopped they will simply not do any kind of processing. + +The downside to this technique is that I've noticed a fairly lengthy delay in stopping the +device, up to a second. This is on a virtual machine, and as such might just be due to the +virtual drivers, but I'm not fully sure. I am not sure how to work around this problem so for +the moment that's just how it's going to have to be. + +When starting the device, OSS will automatically start it when write() or read() is called. +*/ +static ma_result ma_device_start__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* The device is automatically started with reading and writing. */ + (void)pDevice; + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* See note above on why this is empty. */ + (void)pDevice; + + return MA_SUCCESS; +} + +static ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int resultOSS; + ma_uint32 deviceState; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + /* Don't do any processing if the device is stopped. */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_started && deviceState != ma_device_state_starting) { + return MA_SUCCESS; + } + + resultOSS = write(pDevice->oss.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (resultOSS < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to send data from the client to the device."); + return ma_result_from_errno(errno); + } + + if (pFramesWritten != NULL) { + *pFramesWritten = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int resultOSS; + ma_uint32 deviceState; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + /* Don't do any processing if the device is stopped. */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_started && deviceState != ma_device_state_starting) { + return MA_SUCCESS; + } + + resultOSS = read(pDevice->oss.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (resultOSS < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to read data from the device to be sent to the client."); + return ma_result_from_errno(errno); + } + + if (pFramesRead != NULL) { + *pFramesRead = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__oss(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_oss); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__oss(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + int fd; + int ossVersion; + int result; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + /* Try opening a temporary device first so we can get version information. This is closed at the end. */ + fd = ma_open_temp_device__oss(); + if (fd == -1) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open temporary device for retrieving system properties."); /* Looks liks OSS isn't installed, or there are no available devices. */ + return MA_NO_BACKEND; + } + + /* Grab the OSS version. */ + ossVersion = 0; + result = ioctl(fd, OSS_GETVERSION, &ossVersion); + if (result == -1) { + close(fd); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve OSS version."); + return MA_NO_BACKEND; + } + + /* The file handle to temp device is no longer needed. Close ASAP. */ + close(fd); + + pContext->oss.versionMajor = ((ossVersion & 0xFF0000) >> 16); + pContext->oss.versionMinor = ((ossVersion & 0x00FF00) >> 8); + + pCallbacks->onContextInit = ma_context_init__oss; + pCallbacks->onContextUninit = ma_context_uninit__oss; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__oss; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__oss; + pCallbacks->onDeviceInit = ma_device_init__oss; + pCallbacks->onDeviceUninit = ma_device_uninit__oss; + pCallbacks->onDeviceStart = ma_device_start__oss; + pCallbacks->onDeviceStop = ma_device_stop__oss; + pCallbacks->onDeviceRead = ma_device_read__oss; + pCallbacks->onDeviceWrite = ma_device_write__oss; + pCallbacks->onDeviceDataLoop = NULL; + + return MA_SUCCESS; +} +#endif /* OSS */ + + + + + +/****************************************************************************** + +AAudio Backend + +******************************************************************************/ +#ifdef MA_HAS_AAUDIO + +/*#include */ + +typedef int32_t ma_aaudio_result_t; +typedef int32_t ma_aaudio_direction_t; +typedef int32_t ma_aaudio_sharing_mode_t; +typedef int32_t ma_aaudio_format_t; +typedef int32_t ma_aaudio_stream_state_t; +typedef int32_t ma_aaudio_performance_mode_t; +typedef int32_t ma_aaudio_usage_t; +typedef int32_t ma_aaudio_content_type_t; +typedef int32_t ma_aaudio_input_preset_t; +typedef int32_t ma_aaudio_allowed_capture_policy_t; +typedef int32_t ma_aaudio_data_callback_result_t; +typedef struct ma_AAudioStreamBuilder_t* ma_AAudioStreamBuilder; +typedef struct ma_AAudioStream_t* ma_AAudioStream; + +#define MA_AAUDIO_UNSPECIFIED 0 + +/* Result codes. miniaudio only cares about the success code. */ +#define MA_AAUDIO_OK 0 + +/* Directions. */ +#define MA_AAUDIO_DIRECTION_OUTPUT 0 +#define MA_AAUDIO_DIRECTION_INPUT 1 + +/* Sharing modes. */ +#define MA_AAUDIO_SHARING_MODE_EXCLUSIVE 0 +#define MA_AAUDIO_SHARING_MODE_SHARED 1 + +/* Formats. */ +#define MA_AAUDIO_FORMAT_PCM_I16 1 +#define MA_AAUDIO_FORMAT_PCM_FLOAT 2 + +/* Stream states. */ +#define MA_AAUDIO_STREAM_STATE_UNINITIALIZED 0 +#define MA_AAUDIO_STREAM_STATE_UNKNOWN 1 +#define MA_AAUDIO_STREAM_STATE_OPEN 2 +#define MA_AAUDIO_STREAM_STATE_STARTING 3 +#define MA_AAUDIO_STREAM_STATE_STARTED 4 +#define MA_AAUDIO_STREAM_STATE_PAUSING 5 +#define MA_AAUDIO_STREAM_STATE_PAUSED 6 +#define MA_AAUDIO_STREAM_STATE_FLUSHING 7 +#define MA_AAUDIO_STREAM_STATE_FLUSHED 8 +#define MA_AAUDIO_STREAM_STATE_STOPPING 9 +#define MA_AAUDIO_STREAM_STATE_STOPPED 10 +#define MA_AAUDIO_STREAM_STATE_CLOSING 11 +#define MA_AAUDIO_STREAM_STATE_CLOSED 12 +#define MA_AAUDIO_STREAM_STATE_DISCONNECTED 13 + +/* Performance modes. */ +#define MA_AAUDIO_PERFORMANCE_MODE_NONE 10 +#define MA_AAUDIO_PERFORMANCE_MODE_POWER_SAVING 11 +#define MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY 12 + +/* Usage types. */ +#define MA_AAUDIO_USAGE_MEDIA 1 +#define MA_AAUDIO_USAGE_VOICE_COMMUNICATION 2 +#define MA_AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING 3 +#define MA_AAUDIO_USAGE_ALARM 4 +#define MA_AAUDIO_USAGE_NOTIFICATION 5 +#define MA_AAUDIO_USAGE_NOTIFICATION_RINGTONE 6 +#define MA_AAUDIO_USAGE_NOTIFICATION_EVENT 10 +#define MA_AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY 11 +#define MA_AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE 12 +#define MA_AAUDIO_USAGE_ASSISTANCE_SONIFICATION 13 +#define MA_AAUDIO_USAGE_GAME 14 +#define MA_AAUDIO_USAGE_ASSISTANT 16 +#define MA_AAUDIO_SYSTEM_USAGE_EMERGENCY 1000 +#define MA_AAUDIO_SYSTEM_USAGE_SAFETY 1001 +#define MA_AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS 1002 +#define MA_AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT 1003 + +/* Content types. */ +#define MA_AAUDIO_CONTENT_TYPE_SPEECH 1 +#define MA_AAUDIO_CONTENT_TYPE_MUSIC 2 +#define MA_AAUDIO_CONTENT_TYPE_MOVIE 3 +#define MA_AAUDIO_CONTENT_TYPE_SONIFICATION 4 + +/* Input presets. */ +#define MA_AAUDIO_INPUT_PRESET_GENERIC 1 +#define MA_AAUDIO_INPUT_PRESET_CAMCORDER 5 +#define MA_AAUDIO_INPUT_PRESET_VOICE_RECOGNITION 6 +#define MA_AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION 7 +#define MA_AAUDIO_INPUT_PRESET_UNPROCESSED 9 +#define MA_AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE 10 + +/* Allowed Capture Policies */ +#define MA_AAUDIO_ALLOW_CAPTURE_BY_ALL 1 +#define MA_AAUDIO_ALLOW_CAPTURE_BY_SYSTEM 2 +#define MA_AAUDIO_ALLOW_CAPTURE_BY_NONE 3 + +/* Callback results. */ +#define MA_AAUDIO_CALLBACK_RESULT_CONTINUE 0 +#define MA_AAUDIO_CALLBACK_RESULT_STOP 1 + + +typedef ma_aaudio_data_callback_result_t (* ma_AAudioStream_dataCallback) (ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames); +typedef void (* ma_AAudioStream_errorCallback)(ma_AAudioStream *pStream, void *pUserData, ma_aaudio_result_t error); + +typedef ma_aaudio_result_t (* MA_PFN_AAudio_createStreamBuilder) (ma_AAudioStreamBuilder** ppBuilder); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_delete) (ma_AAudioStreamBuilder* pBuilder); +typedef void (* MA_PFN_AAudioStreamBuilder_setDeviceId) (ma_AAudioStreamBuilder* pBuilder, int32_t deviceId); +typedef void (* MA_PFN_AAudioStreamBuilder_setDirection) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_direction_t direction); +typedef void (* MA_PFN_AAudioStreamBuilder_setSharingMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_sharing_mode_t sharingMode); +typedef void (* MA_PFN_AAudioStreamBuilder_setFormat) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_format_t format); +typedef void (* MA_PFN_AAudioStreamBuilder_setChannelCount) (ma_AAudioStreamBuilder* pBuilder, int32_t channelCount); +typedef void (* MA_PFN_AAudioStreamBuilder_setSampleRate) (ma_AAudioStreamBuilder* pBuilder, int32_t sampleRate); +typedef void (* MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)(ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); +typedef void (* MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback) (ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); +typedef void (* MA_PFN_AAudioStreamBuilder_setDataCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_dataCallback callback, void* pUserData); +typedef void (* MA_PFN_AAudioStreamBuilder_setErrorCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_errorCallback callback, void* pUserData); +typedef void (* MA_PFN_AAudioStreamBuilder_setPerformanceMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_performance_mode_t mode); +typedef void (* MA_PFN_AAudioStreamBuilder_setUsage) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_usage_t contentType); +typedef void (* MA_PFN_AAudioStreamBuilder_setContentType) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_content_type_t contentType); +typedef void (* MA_PFN_AAudioStreamBuilder_setInputPreset) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_input_preset_t inputPreset); +typedef void (* MA_PFN_AAudioStreamBuilder_setAllowedCapturePolicy) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_allowed_capture_policy_t policy); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_openStream) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_close) (ma_AAudioStream* pStream); +typedef ma_aaudio_stream_state_t (* MA_PFN_AAudioStream_getState) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_waitForStateChange) (ma_AAudioStream* pStream, ma_aaudio_stream_state_t inputState, ma_aaudio_stream_state_t* pNextState, int64_t timeoutInNanoseconds); +typedef ma_aaudio_format_t (* MA_PFN_AAudioStream_getFormat) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getChannelCount) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getSampleRate) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getBufferCapacityInFrames) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getFramesPerDataCallback) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getFramesPerBurst) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStart) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStop) (ma_AAudioStream* pStream); + +static ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA) +{ + switch (resultAA) + { + case MA_AAUDIO_OK: return MA_SUCCESS; + default: break; + } + + return MA_ERROR; +} + +static ma_aaudio_usage_t ma_to_usage__aaudio(ma_aaudio_usage usage) +{ + switch (usage) { + case ma_aaudio_usage_media: return MA_AAUDIO_USAGE_MEDIA; + case ma_aaudio_usage_voice_communication: return MA_AAUDIO_USAGE_VOICE_COMMUNICATION; + case ma_aaudio_usage_voice_communication_signalling: return MA_AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING; + case ma_aaudio_usage_alarm: return MA_AAUDIO_USAGE_ALARM; + case ma_aaudio_usage_notification: return MA_AAUDIO_USAGE_NOTIFICATION; + case ma_aaudio_usage_notification_ringtone: return MA_AAUDIO_USAGE_NOTIFICATION_RINGTONE; + case ma_aaudio_usage_notification_event: return MA_AAUDIO_USAGE_NOTIFICATION_EVENT; + case ma_aaudio_usage_assistance_accessibility: return MA_AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY; + case ma_aaudio_usage_assistance_navigation_guidance: return MA_AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE; + case ma_aaudio_usage_assistance_sonification: return MA_AAUDIO_USAGE_ASSISTANCE_SONIFICATION; + case ma_aaudio_usage_game: return MA_AAUDIO_USAGE_GAME; + case ma_aaudio_usage_assitant: return MA_AAUDIO_USAGE_ASSISTANT; + case ma_aaudio_usage_emergency: return MA_AAUDIO_SYSTEM_USAGE_EMERGENCY; + case ma_aaudio_usage_safety: return MA_AAUDIO_SYSTEM_USAGE_SAFETY; + case ma_aaudio_usage_vehicle_status: return MA_AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS; + case ma_aaudio_usage_announcement: return MA_AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT; + default: break; + } + + return MA_AAUDIO_USAGE_MEDIA; +} + +static ma_aaudio_content_type_t ma_to_content_type__aaudio(ma_aaudio_content_type contentType) +{ + switch (contentType) { + case ma_aaudio_content_type_speech: return MA_AAUDIO_CONTENT_TYPE_SPEECH; + case ma_aaudio_content_type_music: return MA_AAUDIO_CONTENT_TYPE_MUSIC; + case ma_aaudio_content_type_movie: return MA_AAUDIO_CONTENT_TYPE_MOVIE; + case ma_aaudio_content_type_sonification: return MA_AAUDIO_CONTENT_TYPE_SONIFICATION; + default: break; + } + + return MA_AAUDIO_CONTENT_TYPE_SPEECH; +} + +static ma_aaudio_input_preset_t ma_to_input_preset__aaudio(ma_aaudio_input_preset inputPreset) +{ + switch (inputPreset) { + case ma_aaudio_input_preset_generic: return MA_AAUDIO_INPUT_PRESET_GENERIC; + case ma_aaudio_input_preset_camcorder: return MA_AAUDIO_INPUT_PRESET_CAMCORDER; + case ma_aaudio_input_preset_voice_recognition: return MA_AAUDIO_INPUT_PRESET_VOICE_RECOGNITION; + case ma_aaudio_input_preset_voice_communication: return MA_AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION; + case ma_aaudio_input_preset_unprocessed: return MA_AAUDIO_INPUT_PRESET_UNPROCESSED; + case ma_aaudio_input_preset_voice_performance: return MA_AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE; + default: break; + } + + return MA_AAUDIO_INPUT_PRESET_GENERIC; +} + +static ma_aaudio_allowed_capture_policy_t ma_to_allowed_capture_policy__aaudio(ma_aaudio_allowed_capture_policy allowedCapturePolicy) +{ + switch (allowedCapturePolicy) { + case ma_aaudio_allow_capture_by_all: return MA_AAUDIO_ALLOW_CAPTURE_BY_ALL; + case ma_aaudio_allow_capture_by_system: return MA_AAUDIO_ALLOW_CAPTURE_BY_SYSTEM; + case ma_aaudio_allow_capture_by_none: return MA_AAUDIO_ALLOW_CAPTURE_BY_NONE; + default: break; + } + + return MA_AAUDIO_ALLOW_CAPTURE_BY_ALL; +} + +static void ma_stream_error_callback__aaudio(ma_AAudioStream* pStream, void* pUserData, ma_aaudio_result_t error) +{ + ma_result result; + ma_job job; + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + (void)error; + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[AAudio] ERROR CALLBACK: error=%d, AAudioStream_getState()=%d\n", error, ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream)); + + /* + When we get an error, we'll assume that the stream is in an erroneous state and needs to be restarted. From the documentation, + we cannot do this from the error callback. Therefore we are going to use an event thread for the AAudio backend to do this + cleanly and safely. + */ + job = ma_job_init(MA_JOB_TYPE_DEVICE_AAUDIO_REROUTE); + job.data.device.aaudio.reroute.pDevice = pDevice; + + if (pStream == pDevice->aaudio.pStreamCapture) { + job.data.device.aaudio.reroute.deviceType = ma_device_type_capture; + } + else { + job.data.device.aaudio.reroute.deviceType = ma_device_type_playback; + } + + result = ma_device_job_thread_post(&pDevice->pContext->aaudio.jobThread, &job); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[AAudio] Device Disconnected. Failed to post job for rerouting.\n"); + return; + } +} + +static ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_device_handle_backend_data_callback(pDevice, NULL, pAudioData, frameCount); + + (void)pStream; + return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; +} + +static ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_device_handle_backend_data_callback(pDevice, pAudioData, NULL, frameCount); + + (void)pStream; + return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; +} + +static ma_result ma_create_and_configure_AAudioStreamBuilder__aaudio(ma_context* pContext, const ma_device_id* pDeviceID, ma_device_type deviceType, ma_share_mode shareMode, const ma_device_descriptor* pDescriptor, const ma_device_config* pConfig, ma_device* pDevice, ma_AAudioStreamBuilder** ppBuilder) +{ + ma_AAudioStreamBuilder* pBuilder; + ma_aaudio_result_t resultAA; + + /* Safety. */ + *ppBuilder = NULL; + + resultAA = ((MA_PFN_AAudio_createStreamBuilder)pContext->aaudio.AAudio_createStreamBuilder)(&pBuilder); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + if (pDeviceID != NULL) { + ((MA_PFN_AAudioStreamBuilder_setDeviceId)pContext->aaudio.AAudioStreamBuilder_setDeviceId)(pBuilder, pDeviceID->aaudio); + } + + ((MA_PFN_AAudioStreamBuilder_setDirection)pContext->aaudio.AAudioStreamBuilder_setDirection)(pBuilder, (deviceType == ma_device_type_playback) ? MA_AAUDIO_DIRECTION_OUTPUT : MA_AAUDIO_DIRECTION_INPUT); + ((MA_PFN_AAudioStreamBuilder_setSharingMode)pContext->aaudio.AAudioStreamBuilder_setSharingMode)(pBuilder, (shareMode == ma_share_mode_shared) ? MA_AAUDIO_SHARING_MODE_SHARED : MA_AAUDIO_SHARING_MODE_EXCLUSIVE); + + + /* If we have a device descriptor make sure we configure the stream builder to take our requested parameters. */ + if (pDescriptor != NULL) { + MA_ASSERT(pConfig != NULL); /* We must have a device config if we also have a descriptor. The config is required for AAudio specific configuration options. */ + + if (pDescriptor->sampleRate != 0) { + ((MA_PFN_AAudioStreamBuilder_setSampleRate)pContext->aaudio.AAudioStreamBuilder_setSampleRate)(pBuilder, pDescriptor->sampleRate); + } + + if (deviceType == ma_device_type_capture) { + if (pDescriptor->channels != 0) { + ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pDescriptor->channels); + } + if (pDescriptor->format != ma_format_unknown) { + ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pDescriptor->format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT); + } + } else { + if (pDescriptor->channels != 0) { + ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pDescriptor->channels); + } + if (pDescriptor->format != ma_format_unknown) { + ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pDescriptor->format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT); + } + } + + + /* + There have been reports where setting the frames per data callback results in an error + later on from Android. To address this, I'm experimenting with simply not setting it on + anything from Android 11 and earlier. Suggestions welcome on how we might be able to make + this more targetted. + */ + if (!pConfig->aaudio.enableCompatibilityWorkarounds || ma_android_sdk_version() > 30) { + /* + AAudio is annoying when it comes to it's buffer calculation stuff because it doesn't let you + retrieve the actual sample rate until after you've opened the stream. But you need to configure + the buffer capacity before you open the stream... :/ + + To solve, we're just going to assume MA_DEFAULT_SAMPLE_RATE (48000) and move on. + */ + ma_uint32 bufferCapacityInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, pDescriptor->sampleRate, pConfig->performanceProfile) * pDescriptor->periodCount; + + ((MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames)(pBuilder, bufferCapacityInFrames); + ((MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback)pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback)(pBuilder, bufferCapacityInFrames / pDescriptor->periodCount); + } + + if (deviceType == ma_device_type_capture) { + if (pConfig->aaudio.inputPreset != ma_aaudio_input_preset_default && pContext->aaudio.AAudioStreamBuilder_setInputPreset != NULL) { + ((MA_PFN_AAudioStreamBuilder_setInputPreset)pContext->aaudio.AAudioStreamBuilder_setInputPreset)(pBuilder, ma_to_input_preset__aaudio(pConfig->aaudio.inputPreset)); + } + + ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_capture__aaudio, (void*)pDevice); + } else { + if (pConfig->aaudio.usage != ma_aaudio_usage_default && pContext->aaudio.AAudioStreamBuilder_setUsage != NULL) { + ((MA_PFN_AAudioStreamBuilder_setUsage)pContext->aaudio.AAudioStreamBuilder_setUsage)(pBuilder, ma_to_usage__aaudio(pConfig->aaudio.usage)); + } + + if (pConfig->aaudio.contentType != ma_aaudio_content_type_default && pContext->aaudio.AAudioStreamBuilder_setContentType != NULL) { + ((MA_PFN_AAudioStreamBuilder_setContentType)pContext->aaudio.AAudioStreamBuilder_setContentType)(pBuilder, ma_to_content_type__aaudio(pConfig->aaudio.contentType)); + } + + if (pConfig->aaudio.allowedCapturePolicy != ma_aaudio_allow_capture_default && pContext->aaudio.AAudioStreamBuilder_setAllowedCapturePolicy != NULL) { + ((MA_PFN_AAudioStreamBuilder_setAllowedCapturePolicy)pContext->aaudio.AAudioStreamBuilder_setAllowedCapturePolicy)(pBuilder, ma_to_allowed_capture_policy__aaudio(pConfig->aaudio.allowedCapturePolicy)); + } + + ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_playback__aaudio, (void*)pDevice); + } + + /* Not sure how this affects things, but since there's a mapping between miniaudio's performance profiles and AAudio's performance modes, let go ahead and set it. */ + ((MA_PFN_AAudioStreamBuilder_setPerformanceMode)pContext->aaudio.AAudioStreamBuilder_setPerformanceMode)(pBuilder, (pConfig->performanceProfile == ma_performance_profile_low_latency) ? MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY : MA_AAUDIO_PERFORMANCE_MODE_NONE); + + /* We need to set an error callback to detect device changes. */ + if (pDevice != NULL) { /* <-- pDevice should never be null if pDescriptor is not null, which is always the case if we hit this branch. Check anyway for safety. */ + ((MA_PFN_AAudioStreamBuilder_setErrorCallback)pContext->aaudio.AAudioStreamBuilder_setErrorCallback)(pBuilder, ma_stream_error_callback__aaudio, (void*)pDevice); + } + } + + *ppBuilder = pBuilder; + + return MA_SUCCESS; +} + +static ma_result ma_open_stream_and_close_builder__aaudio(ma_context* pContext, ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream) +{ + ma_result result; + + result = ma_result_from_aaudio(((MA_PFN_AAudioStreamBuilder_openStream)pContext->aaudio.AAudioStreamBuilder_openStream)(pBuilder, ppStream)); + ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder); + + return result; +} + +static ma_result ma_open_stream_basic__aaudio(ma_context* pContext, const ma_device_id* pDeviceID, ma_device_type deviceType, ma_share_mode shareMode, ma_AAudioStream** ppStream) +{ + ma_result result; + ma_AAudioStreamBuilder* pBuilder; + + *ppStream = NULL; + + result = ma_create_and_configure_AAudioStreamBuilder__aaudio(pContext, pDeviceID, deviceType, shareMode, NULL, NULL, NULL, &pBuilder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_open_stream_and_close_builder__aaudio(pContext, pBuilder, ppStream); +} + +static ma_result ma_open_stream__aaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_type deviceType, const ma_device_descriptor* pDescriptor, ma_AAudioStream** ppStream) +{ + ma_result result; + ma_AAudioStreamBuilder* pBuilder; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pDescriptor != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should not be called for a full-duplex device type. */ + + *ppStream = NULL; + + result = ma_create_and_configure_AAudioStreamBuilder__aaudio(pDevice->pContext, pDescriptor->pDeviceID, deviceType, pDescriptor->shareMode, pDescriptor, pConfig, pDevice, &pBuilder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_open_stream_and_close_builder__aaudio(pDevice->pContext, pBuilder, ppStream); +} + +static ma_result ma_close_stream__aaudio(ma_context* pContext, ma_AAudioStream* pStream) +{ + return ma_result_from_aaudio(((MA_PFN_AAudioStream_close)pContext->aaudio.AAudioStream_close)(pStream)); +} + +static ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type deviceType) +{ + /* The only way to know this is to try creating a stream. */ + ma_AAudioStream* pStream; + ma_result result = ma_open_stream_basic__aaudio(pContext, NULL, deviceType, ma_share_mode_shared, &pStream); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + ma_close_stream__aaudio(pContext, pStream); + return MA_TRUE; +} + +static ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_aaudio_stream_state_t oldState, ma_aaudio_stream_state_t newState) +{ + ma_aaudio_stream_state_t actualNewState; + ma_aaudio_result_t resultAA = ((MA_PFN_AAudioStream_waitForStateChange)pContext->aaudio.AAudioStream_waitForStateChange)(pStream, oldState, &actualNewState, 5000000000); /* 5 second timeout. */ + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + if (newState != actualNewState) { + return MA_ERROR; /* Failed to transition into the expected state. */ + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Unfortunately AAudio does not have an enumeration API. Therefore I'm only going to report default devices, but only if it can instantiate a stream. */ + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + + if (ma_has_default_device__aaudio(pContext, ma_device_type_playback)) { + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + + if (ma_has_default_device__aaudio(pContext, ma_device_type_capture)) { + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } + + return MA_SUCCESS; +} + +static void ma_context_add_native_data_format_from_AAudioStream_ex__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_format format, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pStream != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)(pStream); + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)(pStream); + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags; + pDeviceInfo->nativeDataFormatCount += 1; +} + +static void ma_context_add_native_data_format_from_AAudioStream__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + /* AAudio supports s16 and f32. */ + ma_context_add_native_data_format_from_AAudioStream_ex__aaudio(pContext, pStream, ma_format_f32, flags, pDeviceInfo); + ma_context_add_native_data_format_from_AAudioStream_ex__aaudio(pContext, pStream, ma_format_s16, flags, pDeviceInfo); +} + +static ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_AAudioStream* pStream; + ma_result result; + + MA_ASSERT(pContext != NULL); + + /* ID */ + if (pDeviceID != NULL) { + pDeviceInfo->id.aaudio = pDeviceID->aaudio; + } else { + pDeviceInfo->id.aaudio = MA_AAUDIO_UNSPECIFIED; + } + + /* Name */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + + pDeviceInfo->nativeDataFormatCount = 0; + + /* We'll need to open the device to get accurate sample rate and channel count information. */ + result = ma_open_stream_basic__aaudio(pContext, pDeviceID, deviceType, ma_share_mode_shared, &pStream); + if (result != MA_SUCCESS) { + return result; + } + + ma_context_add_native_data_format_from_AAudioStream__aaudio(pContext, pStream, 0, pDeviceInfo); + + ma_close_stream__aaudio(pContext, pStream); + pStream = NULL; + + return MA_SUCCESS; +} + + +static ma_result ma_device_uninit__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + pDevice->aaudio.pStreamCapture = NULL; + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + pDevice->aaudio.pStreamPlayback = NULL; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init_by_type__aaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_type deviceType, ma_device_descriptor* pDescriptor, ma_AAudioStream** ppStream) +{ + ma_result result; + int32_t bufferCapacityInFrames; + int32_t framesPerDataCallback; + ma_AAudioStream* pStream; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDescriptor != NULL); + + *ppStream = NULL; /* Safety. */ + + /* First step is to open the stream. From there we'll be able to extract the internal configuration. */ + result = ma_open_stream__aaudio(pDevice, pConfig, deviceType, pDescriptor, &pStream); + if (result != MA_SUCCESS) { + return result; /* Failed to open the AAudio stream. */ + } + + /* Now extract the internal configuration. */ + pDescriptor->format = (((MA_PFN_AAudioStream_getFormat)pDevice->pContext->aaudio.AAudioStream_getFormat)(pStream) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32; + pDescriptor->channels = ((MA_PFN_AAudioStream_getChannelCount)pDevice->pContext->aaudio.AAudioStream_getChannelCount)(pStream); + pDescriptor->sampleRate = ((MA_PFN_AAudioStream_getSampleRate)pDevice->pContext->aaudio.AAudioStream_getSampleRate)(pStream); + + /* For the channel map we need to be sure we don't overflow any buffers. */ + if (pDescriptor->channels <= MA_MAX_CHANNELS) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), pDescriptor->channels); /* <-- Cannot find info on channel order, so assuming a default. */ + } else { + ma_channel_map_init_blank(pDescriptor->channelMap, MA_MAX_CHANNELS); /* Too many channels. Use a blank channel map. */ + } + + bufferCapacityInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pDevice->pContext->aaudio.AAudioStream_getBufferCapacityInFrames)(pStream); + framesPerDataCallback = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pDevice->pContext->aaudio.AAudioStream_getFramesPerDataCallback)(pStream); + + if (framesPerDataCallback > 0) { + pDescriptor->periodSizeInFrames = framesPerDataCallback; + pDescriptor->periodCount = bufferCapacityInFrames / framesPerDataCallback; + } else { + pDescriptor->periodSizeInFrames = bufferCapacityInFrames; + pDescriptor->periodCount = 1; + } + + *ppStream = pStream; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__aaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + pDevice->aaudio.usage = pConfig->aaudio.usage; + pDevice->aaudio.contentType = pConfig->aaudio.contentType; + pDevice->aaudio.inputPreset = pConfig->aaudio.inputPreset; + pDevice->aaudio.allowedCapturePolicy = pConfig->aaudio.allowedCapturePolicy; + pDevice->aaudio.noAutoStartAfterReroute = pConfig->aaudio.noAutoStartAfterReroute; + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__aaudio(pDevice, pConfig, ma_device_type_capture, pDescriptorCapture, (ma_AAudioStream**)&pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__aaudio(pDevice, pConfig, ma_device_type_playback, pDescriptorPlayback, (ma_AAudioStream**)&pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +{ + ma_aaudio_result_t resultAA; + ma_aaudio_stream_state_t currentState; + + MA_ASSERT(pDevice != NULL); + + resultAA = ((MA_PFN_AAudioStream_requestStart)pDevice->pContext->aaudio.AAudioStream_requestStart)(pStream); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + /* Do we actually need to wait for the device to transition into it's started state? */ + + /* The device should be in either a starting or started state. If it's not set to started we need to wait for it to transition. It should go from starting to started. */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState != MA_AAUDIO_STREAM_STATE_STARTED) { + ma_result result; + + if (currentState != MA_AAUDIO_STREAM_STATE_STARTING) { + return MA_ERROR; /* Expecting the stream to be a starting or started state. */ + } + + result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STARTED); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +{ + ma_aaudio_result_t resultAA; + ma_aaudio_stream_state_t currentState; + + MA_ASSERT(pDevice != NULL); + + /* + From the AAudio documentation: + + The stream will stop after all of the data currently buffered has been played. + + This maps with miniaudio's requirement that device's be drained which means we don't need to implement any draining logic. + */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState == MA_AAUDIO_STREAM_STATE_DISCONNECTED) { + return MA_SUCCESS; /* The device is disconnected. Don't try stopping it. */ + } + + resultAA = ((MA_PFN_AAudioStream_requestStop)pDevice->pContext->aaudio.AAudioStream_requestStop)(pStream); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + /* The device should be in either a stopping or stopped state. If it's not set to started we need to wait for it to transition. It should go from stopping to stopped. */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState != MA_AAUDIO_STREAM_STATE_STOPPED) { + ma_result result; + + if (currentState != MA_AAUDIO_STREAM_STATE_STOPPING) { + return MA_ERROR; /* Expecting the stream to be a stopping or stopped state. */ + } + + result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STOPPED); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + if (pDevice->type == ma_device_type_duplex) { + ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + } + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + +static ma_result ma_device_reinit__aaudio(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* The first thing to do is close the streams. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + pDevice->aaudio.pStreamCapture = NULL; + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + pDevice->aaudio.pStreamPlayback = NULL; + } + + /* Now we need to reinitialize each streams. The hardest part with this is just filling output the config and descriptors. */ + { + ma_device_config deviceConfig; + ma_device_descriptor descriptorPlayback; + ma_device_descriptor descriptorCapture; + + deviceConfig = ma_device_config_init(deviceType); + deviceConfig.playback.pDeviceID = NULL; /* Only doing rerouting with default devices. */ + deviceConfig.playback.shareMode = pDevice->playback.shareMode; + deviceConfig.playback.format = pDevice->playback.format; + deviceConfig.playback.channels = pDevice->playback.channels; + deviceConfig.capture.pDeviceID = NULL; /* Only doing rerouting with default devices. */ + deviceConfig.capture.shareMode = pDevice->capture.shareMode; + deviceConfig.capture.format = pDevice->capture.format; + deviceConfig.capture.channels = pDevice->capture.channels; + deviceConfig.sampleRate = pDevice->sampleRate; + deviceConfig.aaudio.usage = pDevice->aaudio.usage; + deviceConfig.aaudio.contentType = pDevice->aaudio.contentType; + deviceConfig.aaudio.inputPreset = pDevice->aaudio.inputPreset; + deviceConfig.aaudio.allowedCapturePolicy = pDevice->aaudio.allowedCapturePolicy; + deviceConfig.aaudio.noAutoStartAfterReroute = pDevice->aaudio.noAutoStartAfterReroute; + deviceConfig.periods = 1; + + /* Try to get an accurate period size. */ + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + deviceConfig.periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + } else { + deviceConfig.periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + } + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + descriptorCapture.pDeviceID = deviceConfig.capture.pDeviceID; + descriptorCapture.shareMode = deviceConfig.capture.shareMode; + descriptorCapture.format = deviceConfig.capture.format; + descriptorCapture.channels = deviceConfig.capture.channels; + descriptorCapture.sampleRate = deviceConfig.sampleRate; + descriptorCapture.periodSizeInFrames = deviceConfig.periodSizeInFrames; + descriptorCapture.periodCount = deviceConfig.periods; + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + descriptorPlayback.pDeviceID = deviceConfig.playback.pDeviceID; + descriptorPlayback.shareMode = deviceConfig.playback.shareMode; + descriptorPlayback.format = deviceConfig.playback.format; + descriptorPlayback.channels = deviceConfig.playback.channels; + descriptorPlayback.sampleRate = deviceConfig.sampleRate; + descriptorPlayback.periodSizeInFrames = deviceConfig.periodSizeInFrames; + descriptorPlayback.periodCount = deviceConfig.periods; + } + + result = ma_device_init__aaudio(pDevice, &deviceConfig, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_device_post_init(pDevice, deviceType, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + ma_device_uninit__aaudio(pDevice); + return result; + } + + /* We'll only ever do this in response to a reroute. */ + ma_device__on_notification_rerouted(pDevice); + + /* If the device is started, start the streams. Maybe make this configurable? */ + if (ma_device_get_state(pDevice) == ma_device_state_started) { + if (pDevice->aaudio.noAutoStartAfterReroute == MA_FALSE) { + ma_device_start__aaudio(pDevice); + } else { + ma_device_stop(pDevice); /* Do a full device stop so we set internal state correctly. */ + } + } + + return MA_SUCCESS; + } +} + +static ma_result ma_device_get_info__aaudio(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo) +{ + ma_AAudioStream* pStream = NULL; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(type != ma_device_type_duplex); + MA_ASSERT(pDeviceInfo != NULL); + + if (type == ma_device_type_playback) { + pStream = (ma_AAudioStream*)pDevice->aaudio.pStreamCapture; + pDeviceInfo->id.aaudio = pDevice->capture.id.aaudio; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); /* Only supporting default devices. */ + } + if (type == ma_device_type_capture) { + pStream = (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback; + pDeviceInfo->id.aaudio = pDevice->playback.id.aaudio; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); /* Only supporting default devices. */ + } + + /* Safety. Should never happen. */ + if (pStream == NULL) { + return MA_INVALID_OPERATION; + } + + pDeviceInfo->nativeDataFormatCount = 0; + ma_context_add_native_data_format_from_AAudioStream__aaudio(pDevice->pContext, pStream, 0, pDeviceInfo); + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__aaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_aaudio); + + ma_device_job_thread_uninit(&pContext->aaudio.jobThread, &pContext->allocationCallbacks); + + ma_dlclose(ma_context_get_log(pContext), pContext->aaudio.hAAudio); + pContext->aaudio.hAAudio = NULL; + + return MA_SUCCESS; +} + +static ma_result ma_context_init__aaudio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + size_t i; + const char* libNames[] = { + "libaaudio.so" + }; + + for (i = 0; i < ma_countof(libNames); ++i) { + pContext->aaudio.hAAudio = ma_dlopen(ma_context_get_log(pContext), libNames[i]); + if (pContext->aaudio.hAAudio != NULL) { + break; + } + } + + if (pContext->aaudio.hAAudio == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->aaudio.AAudio_createStreamBuilder = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudio_createStreamBuilder"); + pContext->aaudio.AAudioStreamBuilder_delete = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_delete"); + pContext->aaudio.AAudioStreamBuilder_setDeviceId = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDeviceId"); + pContext->aaudio.AAudioStreamBuilder_setDirection = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDirection"); + pContext->aaudio.AAudioStreamBuilder_setSharingMode = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSharingMode"); + pContext->aaudio.AAudioStreamBuilder_setFormat = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFormat"); + pContext->aaudio.AAudioStreamBuilder_setChannelCount = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setChannelCount"); + pContext->aaudio.AAudioStreamBuilder_setSampleRate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSampleRate"); + pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setBufferCapacityInFrames"); + pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFramesPerDataCallback"); + pContext->aaudio.AAudioStreamBuilder_setDataCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDataCallback"); + pContext->aaudio.AAudioStreamBuilder_setErrorCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setErrorCallback"); + pContext->aaudio.AAudioStreamBuilder_setPerformanceMode = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setPerformanceMode"); + pContext->aaudio.AAudioStreamBuilder_setUsage = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setUsage"); + pContext->aaudio.AAudioStreamBuilder_setContentType = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setContentType"); + pContext->aaudio.AAudioStreamBuilder_setInputPreset = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setInputPreset"); + pContext->aaudio.AAudioStreamBuilder_setAllowedCapturePolicy = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setAllowedCapturePolicy"); + pContext->aaudio.AAudioStreamBuilder_openStream = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_openStream"); + pContext->aaudio.AAudioStream_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_close"); + pContext->aaudio.AAudioStream_getState = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getState"); + pContext->aaudio.AAudioStream_waitForStateChange = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_waitForStateChange"); + pContext->aaudio.AAudioStream_getFormat = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getFormat"); + pContext->aaudio.AAudioStream_getChannelCount = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getChannelCount"); + pContext->aaudio.AAudioStream_getSampleRate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getSampleRate"); + pContext->aaudio.AAudioStream_getBufferCapacityInFrames = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getBufferCapacityInFrames"); + pContext->aaudio.AAudioStream_getFramesPerDataCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getFramesPerDataCallback"); + pContext->aaudio.AAudioStream_getFramesPerBurst = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getFramesPerBurst"); + pContext->aaudio.AAudioStream_requestStart = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_requestStart"); + pContext->aaudio.AAudioStream_requestStop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_requestStop"); + + + pCallbacks->onContextInit = ma_context_init__aaudio; + pCallbacks->onContextUninit = ma_context_uninit__aaudio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__aaudio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__aaudio; + pCallbacks->onDeviceInit = ma_device_init__aaudio; + pCallbacks->onDeviceUninit = ma_device_uninit__aaudio; + pCallbacks->onDeviceStart = ma_device_start__aaudio; + pCallbacks->onDeviceStop = ma_device_stop__aaudio; + pCallbacks->onDeviceRead = NULL; /* Not used because AAudio is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not used because AAudio is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not used because AAudio is asynchronous. */ + pCallbacks->onDeviceGetInfo = ma_device_get_info__aaudio; + + + /* We need a job thread so we can deal with rerouting. */ + { + ma_result result; + ma_device_job_thread_config jobThreadConfig; + + jobThreadConfig = ma_device_job_thread_config_init(); + + result = ma_device_job_thread_init(&jobThreadConfig, &pContext->allocationCallbacks, &pContext->aaudio.jobThread); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->aaudio.hAAudio); + pContext->aaudio.hAAudio = NULL; + return result; + } + } + + + (void)pConfig; + return MA_SUCCESS; +} + +static ma_result ma_job_process__device__aaudio_reroute(ma_job* pJob) +{ + ma_device* pDevice; + + MA_ASSERT(pJob != NULL); + + pDevice = (ma_device*)pJob->data.device.aaudio.reroute.pDevice; + MA_ASSERT(pDevice != NULL); + + /* Here is where we need to reroute the device. To do this we need to uninitialize the stream and reinitialize it. */ + return ma_device_reinit__aaudio(pDevice, (ma_device_type)pJob->data.device.aaudio.reroute.deviceType); +} +#else +/* Getting here means there is no AAudio backend so we need a no-op job implementation. */ +static ma_result ma_job_process__device__aaudio_reroute(ma_job* pJob) +{ + return ma_job_process__noop(pJob); +} +#endif /* AAudio */ + + +/****************************************************************************** + +OpenSL|ES Backend + +******************************************************************************/ +#ifdef MA_HAS_OPENSL +#include +#ifdef MA_ANDROID +#include +#endif + +typedef SLresult (SLAPIENTRY * ma_slCreateEngine_proc)(SLObjectItf* pEngine, SLuint32 numOptions, SLEngineOption* pEngineOptions, SLuint32 numInterfaces, SLInterfaceID* pInterfaceIds, SLboolean* pInterfaceRequired); + +/* OpenSL|ES has one-per-application objects :( */ +static SLObjectItf g_maEngineObjectSL = NULL; +static SLEngineItf g_maEngineSL = NULL; +static ma_uint32 g_maOpenSLInitCounter = 0; +static ma_spinlock g_maOpenSLSpinlock = 0; /* For init/uninit. */ + +#define MA_OPENSL_OBJ(p) (*((SLObjectItf)(p))) +#define MA_OPENSL_OUTPUTMIX(p) (*((SLOutputMixItf)(p))) +#define MA_OPENSL_PLAY(p) (*((SLPlayItf)(p))) +#define MA_OPENSL_RECORD(p) (*((SLRecordItf)(p))) + +#ifdef MA_ANDROID +#define MA_OPENSL_BUFFERQUEUE(p) (*((SLAndroidSimpleBufferQueueItf)(p))) +#else +#define MA_OPENSL_BUFFERQUEUE(p) (*((SLBufferQueueItf)(p))) +#endif + +static ma_result ma_result_from_OpenSL(SLuint32 result) +{ + switch (result) + { + case SL_RESULT_SUCCESS: return MA_SUCCESS; + case SL_RESULT_PRECONDITIONS_VIOLATED: return MA_ERROR; + case SL_RESULT_PARAMETER_INVALID: return MA_INVALID_ARGS; + case SL_RESULT_MEMORY_FAILURE: return MA_OUT_OF_MEMORY; + case SL_RESULT_RESOURCE_ERROR: return MA_INVALID_DATA; + case SL_RESULT_RESOURCE_LOST: return MA_ERROR; + case SL_RESULT_IO_ERROR: return MA_IO_ERROR; + case SL_RESULT_BUFFER_INSUFFICIENT: return MA_NO_SPACE; + case SL_RESULT_CONTENT_CORRUPTED: return MA_INVALID_DATA; + case SL_RESULT_CONTENT_UNSUPPORTED: return MA_FORMAT_NOT_SUPPORTED; + case SL_RESULT_CONTENT_NOT_FOUND: return MA_ERROR; + case SL_RESULT_PERMISSION_DENIED: return MA_ACCESS_DENIED; + case SL_RESULT_FEATURE_UNSUPPORTED: return MA_NOT_IMPLEMENTED; + case SL_RESULT_INTERNAL_ERROR: return MA_ERROR; + case SL_RESULT_UNKNOWN_ERROR: return MA_ERROR; + case SL_RESULT_OPERATION_ABORTED: return MA_ERROR; + case SL_RESULT_CONTROL_LOST: return MA_ERROR; + default: return MA_ERROR; + } +} + +/* Converts an individual OpenSL-style channel identifier (SL_SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ +static ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id) +{ + switch (id) + { + case SL_SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case SL_SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case SL_SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case SL_SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE; + case SL_SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT; + case SL_SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case SL_SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case SL_SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case SL_SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER; + case SL_SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case SL_SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case SL_SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case SL_SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case SL_SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case SL_SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case SL_SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case SL_SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + case SL_SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to OpenSL-style. */ +static SLuint32 ma_channel_id_to_opensl(ma_uint8 id) +{ + switch (id) + { + case MA_CHANNEL_MONO: return SL_SPEAKER_FRONT_CENTER; + case MA_CHANNEL_FRONT_LEFT: return SL_SPEAKER_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return SL_SPEAKER_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return SL_SPEAKER_FRONT_CENTER; + case MA_CHANNEL_LFE: return SL_SPEAKER_LOW_FREQUENCY; + case MA_CHANNEL_BACK_LEFT: return SL_SPEAKER_BACK_LEFT; + case MA_CHANNEL_BACK_RIGHT: return SL_SPEAKER_BACK_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return SL_SPEAKER_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return SL_SPEAKER_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return SL_SPEAKER_BACK_CENTER; + case MA_CHANNEL_SIDE_LEFT: return SL_SPEAKER_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return SL_SPEAKER_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return SL_SPEAKER_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return SL_SPEAKER_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return SL_SPEAKER_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return SL_SPEAKER_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return SL_SPEAKER_TOP_BACK_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return SL_SPEAKER_TOP_BACK_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return SL_SPEAKER_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts a channel mapping to an OpenSL-style channel mask. */ +static SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel* pChannelMap, ma_uint32 channels) +{ + SLuint32 channelMask = 0; + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + channelMask |= ma_channel_id_to_opensl(pChannelMap[iChannel]); + } + + return channelMask; +} + +/* Converts an OpenSL-style channel mask to a miniaudio channel map. */ +static void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 channels, ma_channel* pChannelMap) +{ + if (channels == 1 && channelMask == 0) { + pChannelMap[0] = MA_CHANNEL_MONO; + } else if (channels == 2 && channelMask == 0) { + pChannelMap[0] = MA_CHANNEL_FRONT_LEFT; + pChannelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + if (channels == 1 && (channelMask & SL_SPEAKER_FRONT_CENTER) != 0) { + pChannelMap[0] = MA_CHANNEL_MONO; + } else { + /* Just iterate over each bit. */ + ma_uint32 iChannel = 0; + ma_uint32 iBit; + for (iBit = 0; iBit < 32 && iChannel < channels; ++iBit) { + SLuint32 bitValue = (channelMask & (1UL << iBit)); + if (bitValue != 0) { + /* The bit is set. */ + pChannelMap[iChannel] = ma_channel_id_to_ma__opensl(bitValue); + iChannel += 1; + } + } + } + } +} + +static SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec) +{ + if (samplesPerSec <= SL_SAMPLINGRATE_8) { + return SL_SAMPLINGRATE_8; + } + if (samplesPerSec <= SL_SAMPLINGRATE_11_025) { + return SL_SAMPLINGRATE_11_025; + } + if (samplesPerSec <= SL_SAMPLINGRATE_12) { + return SL_SAMPLINGRATE_12; + } + if (samplesPerSec <= SL_SAMPLINGRATE_16) { + return SL_SAMPLINGRATE_16; + } + if (samplesPerSec <= SL_SAMPLINGRATE_22_05) { + return SL_SAMPLINGRATE_22_05; + } + if (samplesPerSec <= SL_SAMPLINGRATE_24) { + return SL_SAMPLINGRATE_24; + } + if (samplesPerSec <= SL_SAMPLINGRATE_32) { + return SL_SAMPLINGRATE_32; + } + if (samplesPerSec <= SL_SAMPLINGRATE_44_1) { + return SL_SAMPLINGRATE_44_1; + } + if (samplesPerSec <= SL_SAMPLINGRATE_48) { + return SL_SAMPLINGRATE_48; + } + + /* Android doesn't support more than 48000. */ +#ifndef MA_ANDROID + if (samplesPerSec <= SL_SAMPLINGRATE_64) { + return SL_SAMPLINGRATE_64; + } + if (samplesPerSec <= SL_SAMPLINGRATE_88_2) { + return SL_SAMPLINGRATE_88_2; + } + if (samplesPerSec <= SL_SAMPLINGRATE_96) { + return SL_SAMPLINGRATE_96; + } + if (samplesPerSec <= SL_SAMPLINGRATE_192) { + return SL_SAMPLINGRATE_192; + } +#endif + + return SL_SAMPLINGRATE_16; +} + + +static SLint32 ma_to_stream_type__opensl(ma_opensl_stream_type streamType) +{ + switch (streamType) { + case ma_opensl_stream_type_voice: return SL_ANDROID_STREAM_VOICE; + case ma_opensl_stream_type_system: return SL_ANDROID_STREAM_SYSTEM; + case ma_opensl_stream_type_ring: return SL_ANDROID_STREAM_RING; + case ma_opensl_stream_type_media: return SL_ANDROID_STREAM_MEDIA; + case ma_opensl_stream_type_alarm: return SL_ANDROID_STREAM_ALARM; + case ma_opensl_stream_type_notification: return SL_ANDROID_STREAM_NOTIFICATION; + default: break; + } + + return SL_ANDROID_STREAM_VOICE; +} + +static SLint32 ma_to_recording_preset__opensl(ma_opensl_recording_preset recordingPreset) +{ + switch (recordingPreset) { + case ma_opensl_recording_preset_generic: return SL_ANDROID_RECORDING_PRESET_GENERIC; + case ma_opensl_recording_preset_camcorder: return SL_ANDROID_RECORDING_PRESET_CAMCORDER; + case ma_opensl_recording_preset_voice_recognition: return SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION; + case ma_opensl_recording_preset_voice_communication: return SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION; + case ma_opensl_recording_preset_voice_unprocessed: return SL_ANDROID_RECORDING_PRESET_UNPROCESSED; + default: break; + } + + return SL_ANDROID_RECORDING_PRESET_NONE; +} + + +static ma_result ma_context_enumerate_devices__opensl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to enumerate devices. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + ma_bool32 isTerminated = MA_FALSE; + + SLuint32 pDeviceIDs[128]; + SLint32 deviceCount = sizeof(pDeviceIDs) / sizeof(pDeviceIDs[0]); + + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, (SLInterfaceID)pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + /* Playback */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioOutputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.pDeviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + /* Capture */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioInputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.deviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + return MA_SUCCESS; +#else + goto return_default_device; +#endif + +return_default_device:; + cbResult = MA_TRUE; + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = SL_DEFAULTDEVICEID_AUDIOOUTPUT; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = SL_DEFAULTDEVICEID_AUDIOINPUT; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + return MA_SUCCESS; +} + +static void ma_context_add_data_format_ex__opensl(ma_context* pContext, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; +} + +static void ma_context_add_data_format__opensl(ma_context* pContext, ma_format format, ma_device_info* pDeviceInfo) +{ + ma_uint32 minChannels = 1; + ma_uint32 maxChannels = 2; + ma_uint32 minSampleRate = (ma_uint32)ma_standard_sample_rate_8000; + ma_uint32 maxSampleRate = (ma_uint32)ma_standard_sample_rate_48000; + ma_uint32 iChannel; + ma_uint32 iSampleRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + /* + Each sample format can support mono and stereo, and we'll support a small subset of standard + rates (up to 48000). A better solution would be to somehow find a native sample rate. + */ + for (iChannel = minChannels; iChannel < maxChannels; iChannel += 1) { + for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); iSampleRate += 1) { + ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iSampleRate]; + if (standardSampleRate >= minSampleRate && standardSampleRate <= maxSampleRate) { + ma_context_add_data_format_ex__opensl(pContext, format, iChannel, standardSampleRate, pDeviceInfo); + } + } + } +} + +static ma_result ma_context_get_device_info__opensl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to get device info. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, (SLInterfaceID)pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + if (deviceType == ma_device_type_playback) { + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.pDeviceName, (size_t)-1); + } else { + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1); + } + + goto return_detailed_info; +#else + goto return_default_device; +#endif + +return_default_device: + if (pDeviceID != NULL) { + if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) || + (deviceType == ma_device_type_capture && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + } + + /* ID and Name / Description */ + if (deviceType == ma_device_type_playback) { + pDeviceInfo->id.opensl = SL_DEFAULTDEVICEID_AUDIOOUTPUT; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + pDeviceInfo->id.opensl = SL_DEFAULTDEVICEID_AUDIOINPUT; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + pDeviceInfo->isDefault = MA_TRUE; + + goto return_detailed_info; + + +return_detailed_info: + + /* + For now we're just outputting a set of values that are supported by the API but not necessarily supported + by the device natively. Later on we should work on this so that it more closely reflects the device's + actual native format. + */ + pDeviceInfo->nativeDataFormatCount = 0; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + ma_context_add_data_format__opensl(pContext, ma_format_f32, pDeviceInfo); +#endif + ma_context_add_data_format__opensl(pContext, ma_format_s16, pDeviceInfo); + ma_context_add_data_format__opensl(pContext, ma_format_u8, pDeviceInfo); + + return MA_SUCCESS; +} + + +#ifdef MA_ANDROID +/*void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, SLuint32 eventFlags, const void* pBuffer, SLuint32 bufferSize, SLuint32 dataUsed, void* pContext)*/ +static void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + (void)pBufferQueue; + + /* + For now, don't do anything unless the buffer was fully processed. From what I can tell, it looks like + OpenSL|ES 1.1 improves on buffer queues to the point that we could much more intelligently handle this, + but unfortunately it looks like Android is only supporting OpenSL|ES 1.0.1 for now :( + */ + + /* Don't do anything if the device is not started. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + return; + } + + /* Don't do anything if the device is being drained. */ + if (pDevice->opensl.isDrainingCapture) { + return; + } + + periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + pBuffer = pDevice->opensl.pBufferCapture + (pDevice->opensl.currentBufferIndexCapture * periodSizeInBytes); + + ma_device_handle_backend_data_callback(pDevice, NULL, pBuffer, pDevice->capture.internalPeriodSizeInFrames); + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pBuffer, periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + return; + } + + pDevice->opensl.currentBufferIndexCapture = (pDevice->opensl.currentBufferIndexCapture + 1) % pDevice->capture.internalPeriods; +} + +static void ma_buffer_queue_callback_playback__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + (void)pBufferQueue; + + /* Don't do anything if the device is not started. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + return; + } + + /* Don't do anything if the device is being drained. */ + if (pDevice->opensl.isDrainingPlayback) { + return; + } + + periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + pBuffer = pDevice->opensl.pBufferPlayback + (pDevice->opensl.currentBufferIndexPlayback * periodSizeInBytes); + + ma_device_handle_backend_data_callback(pDevice, pBuffer, NULL, pDevice->playback.internalPeriodSizeInFrames); + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pBuffer, periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + return; + } + + pDevice->opensl.currentBufferIndexPlayback = (pDevice->opensl.currentBufferIndexPlayback + 1) % pDevice->playback.internalPeriods; +} +#endif + +static ma_result ma_device_uninit__opensl(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before uninitializing the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioRecorderObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj); + } + + ma_free(pDevice->opensl.pBufferCapture, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioPlayerObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj); + } + if (pDevice->opensl.pOutputMixObj) { + MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj); + } + + ma_free(pDevice->opensl.pBufferPlayback, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 +typedef SLAndroidDataFormat_PCM_EX ma_SLDataFormat_PCM; +#else +typedef SLDataFormat_PCM ma_SLDataFormat_PCM; +#endif + +static ma_result ma_SLDataFormat_PCM_init__opensl(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* channelMap, ma_SLDataFormat_PCM* pDataFormat) +{ + /* We need to convert our format/channels/rate so that they aren't set to default. */ + if (format == ma_format_unknown) { + format = MA_DEFAULT_FORMAT; + } + if (channels == 0) { + channels = MA_DEFAULT_CHANNELS; + } + if (sampleRate == 0) { + sampleRate = MA_DEFAULT_SAMPLE_RATE; + } + +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (format == ma_format_f32) { + pDataFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX; + pDataFormat->representation = SL_ANDROID_PCM_REPRESENTATION_FLOAT; + } else { + pDataFormat->formatType = SL_DATAFORMAT_PCM; + } +#else + pDataFormat->formatType = SL_DATAFORMAT_PCM; +#endif + + pDataFormat->numChannels = channels; + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = ma_round_to_standard_sample_rate__opensl(sampleRate * 1000); /* In millihertz. Annoyingly, the sample rate variable is named differently between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM */ + pDataFormat->bitsPerSample = ma_get_bytes_per_sample(format) * 8; + pDataFormat->channelMask = ma_channel_map_to_channel_mask__opensl(channelMap, channels); + pDataFormat->endianness = (ma_is_little_endian()) ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN; + + /* + Android has a few restrictions on the format as documented here: https://developer.android.com/ndk/guides/audio/opensl-for-android.html + - Only mono and stereo is supported. + - Only u8 and s16 formats are supported. + - Maximum sample rate of 48000. + */ +#ifdef MA_ANDROID + if (pDataFormat->numChannels > 2) { + pDataFormat->numChannels = 2; + } +#if __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + /* It's floating point. */ + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + if (pDataFormat->bitsPerSample > 32) { + pDataFormat->bitsPerSample = 32; + } + } else { + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } + } +#else + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } +#endif + if (((SLDataFormat_PCM*)pDataFormat)->samplesPerSec > SL_SAMPLINGRATE_48) { + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = SL_SAMPLINGRATE_48; + } +#endif + + pDataFormat->containerSize = pDataFormat->bitsPerSample; /* Always tightly packed for now. */ + + return MA_SUCCESS; +} + +static ma_result ma_deconstruct_SLDataFormat_PCM__opensl(ma_SLDataFormat_PCM* pDataFormat, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_bool32 isFloatingPoint = MA_FALSE; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + isFloatingPoint = MA_TRUE; + } +#endif + if (isFloatingPoint) { + if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_f32; + } + } else { + if (pDataFormat->bitsPerSample == 8) { + *pFormat = ma_format_u8; + } else if (pDataFormat->bitsPerSample == 16) { + *pFormat = ma_format_s16; + } else if (pDataFormat->bitsPerSample == 24) { + *pFormat = ma_format_s24; + } else if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_s32; + } + } + + *pChannels = pDataFormat->numChannels; + *pSampleRate = ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec / 1000; + ma_channel_mask_to_channel_map__opensl(pDataFormat->channelMask, ma_min(pDataFormat->numChannels, channelMapCap), pChannelMap); + + return MA_SUCCESS; +} + +static ma_result ma_device_init__opensl(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ +#ifdef MA_ANDROID + SLDataLocator_AndroidSimpleBufferQueue queue; + SLresult resultSL; + size_t bufferSizeInBytes; + SLInterfaceID itfIDs[2]; + const SLboolean itfIDsRequired[] = { + SL_BOOLEAN_TRUE, /* SL_IID_ANDROIDSIMPLEBUFFERQUEUE */ + SL_BOOLEAN_FALSE /* SL_IID_ANDROIDCONFIGURATION */ + }; +#endif + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to initialize a new device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* + For now, only supporting Android implementations of OpenSL|ES since that's the only one I've + been able to test with and I currently depend on Android-specific extensions (simple buffer + queues). + */ +#ifdef MA_ANDROID + itfIDs[0] = (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE; + itfIDs[1] = (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDCONFIGURATION; + + /* No exclusive mode with OpenSL|ES. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Now we can start initializing the device properly. */ + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->opensl); + + queue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataLocator_IODevice locatorDevice; + SLDataSource source; + SLDataSink sink; + SLAndroidConfigurationItf pRecorderConfig; + + ma_SLDataFormat_PCM_init__opensl(pDescriptorCapture->format, pDescriptorCapture->channels, pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, &pcm); + + locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE; + locatorDevice.deviceType = SL_IODEVICE_AUDIOINPUT; + locatorDevice.deviceID = SL_DEFAULTDEVICEID_AUDIOINPUT; /* Must always use the default device with Android. */ + locatorDevice.device = NULL; + + source.pLocator = &locatorDevice; + source.pFormat = NULL; + + queue.numBuffers = pDescriptorCapture->periodCount; + + sink.pLocator = &queue; + sink.pFormat = (SLDataFormat_PCM*)&pcm; + + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED || resultSL == SL_RESULT_PARAMETER_INVALID) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 1; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; /* The name of the sample rate variable is different between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM. */ + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = 0; + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + } + + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio recorder."); + return ma_result_from_OpenSL(resultSL); + } + + + /* Set the recording preset before realizing the player. */ + if (pConfig->opensl.recordingPreset != ma_opensl_recording_preset_default) { + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDCONFIGURATION, &pRecorderConfig); + if (resultSL == SL_RESULT_SUCCESS) { + SLint32 recordingPreset = ma_to_recording_preset__opensl(pConfig->opensl.recordingPreset); + resultSL = (*pRecorderConfig)->SetConfiguration(pRecorderConfig, SL_ANDROID_KEY_RECORDING_PRESET, &recordingPreset, sizeof(SLint32)); + if (resultSL != SL_RESULT_SUCCESS) { + /* Failed to set the configuration. Just keep going. */ + } + } + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Realize((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio recorder."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_RECORD, &pDevice->opensl.pAudioRecorder); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_RECORD interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueueCapture); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, ma_buffer_queue_callback_capture__opensl_android, pDevice); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback."); + return ma_result_from_OpenSL(resultSL); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDescriptorCapture->format, &pDescriptorCapture->channels, &pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap)); + + /* Buffer. */ + pDescriptorCapture->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile); + pDevice->opensl.currentBufferIndexCapture = 0; + + bufferSizeInBytes = pDescriptorCapture->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) * pDescriptorCapture->periodCount; + pDevice->opensl.pBufferCapture = (ma_uint8*)ma_calloc(bufferSizeInBytes, &pDevice->pContext->allocationCallbacks); + if (pDevice->opensl.pBufferCapture == NULL) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer."); + return MA_OUT_OF_MEMORY; + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferCapture, bufferSizeInBytes); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataSource source; + SLDataLocator_OutputMix outmixLocator; + SLDataSink sink; + SLAndroidConfigurationItf pPlayerConfig; + + ma_SLDataFormat_PCM_init__opensl(pDescriptorPlayback->format, pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate, pDescriptorPlayback->channelMap, &pcm); + + resultSL = (*g_maEngineSL)->CreateOutputMix(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pOutputMixObj, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface."); + return ma_result_from_OpenSL(resultSL); + } + + /* Set the output device. */ + if (pDescriptorPlayback->pDeviceID != NULL) { + SLuint32 deviceID_OpenSL = pDescriptorPlayback->pDeviceID->opensl; + MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL); + } + + queue.numBuffers = pDescriptorPlayback->periodCount; + + source.pLocator = &queue; + source.pFormat = (SLDataFormat_PCM*)&pcm; + + outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX; + outmixLocator.outputMix = (SLObjectItf)pDevice->opensl.pOutputMixObj; + + sink.pLocator = &outmixLocator; + sink.pFormat = NULL; + + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED || resultSL == SL_RESULT_PARAMETER_INVALID) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 2; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + } + + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio player."); + return ma_result_from_OpenSL(resultSL); + } + + + /* Set the stream type before realizing the player. */ + if (pConfig->opensl.streamType != ma_opensl_stream_type_default) { + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDCONFIGURATION, &pPlayerConfig); + if (resultSL == SL_RESULT_SUCCESS) { + SLint32 streamType = ma_to_stream_type__opensl(pConfig->opensl.streamType); + resultSL = (*pPlayerConfig)->SetConfiguration(pPlayerConfig, SL_ANDROID_KEY_STREAM_TYPE, &streamType, sizeof(SLint32)); + if (resultSL != SL_RESULT_SUCCESS) { + /* Failed to set the configuration. Just keep going. */ + } + } + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Realize((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio player."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_PLAY, &pDevice->opensl.pAudioPlayer); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_PLAY interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueuePlayback); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, ma_buffer_queue_callback_playback__opensl_android, pDevice); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback."); + return ma_result_from_OpenSL(resultSL); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDescriptorPlayback->format, &pDescriptorPlayback->channels, &pDescriptorPlayback->sampleRate, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap)); + + /* Buffer. */ + pDescriptorPlayback->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + pDevice->opensl.currentBufferIndexPlayback = 0; + + bufferSizeInBytes = pDescriptorPlayback->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels) * pDescriptorPlayback->periodCount; + pDevice->opensl.pBufferPlayback = (ma_uint8*)ma_calloc(bufferSizeInBytes, &pDevice->pContext->allocationCallbacks); + if (pDevice->opensl.pBufferPlayback == NULL) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer."); + return MA_OUT_OF_MEMORY; + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, bufferSizeInBytes); + } + + return MA_SUCCESS; +#else + return MA_NO_BACKEND; /* Non-Android implementations are not supported. */ +#endif +} + +static ma_result ma_device_start__opensl(ma_device* pDevice) +{ + SLresult resultSL; + size_t periodSizeInBytes; + ma_uint32 iPeriod; + + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to start the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_RECORDING); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal capture device."); + return ma_result_from_OpenSL(resultSL); + } + + periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pDevice->opensl.pBufferCapture + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for capture device."); + return ma_result_from_OpenSL(resultSL); + } + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_PLAYING); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal playback device."); + return ma_result_from_OpenSL(resultSL); + } + + /* In playback mode (no duplex) we need to load some initial buffers. In duplex mode we need to enqueue silent buffers. */ + if (pDevice->type == ma_device_type_duplex) { + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } else { + ma_device__read_frames_from_client(pDevice, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods, pDevice->opensl.pBufferPlayback); + } + + periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pDevice->opensl.pBufferPlayback + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for playback device."); + return ma_result_from_OpenSL(resultSL); + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_drain__opensl(ma_device* pDevice, ma_device_type deviceType) +{ + SLAndroidSimpleBufferQueueItf pBufferQueue; + + MA_ASSERT(deviceType == ma_device_type_capture || deviceType == ma_device_type_playback); + + if (pDevice->type == ma_device_type_capture) { + pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture; + pDevice->opensl.isDrainingCapture = MA_TRUE; + } else { + pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback; + pDevice->opensl.isDrainingPlayback = MA_TRUE; + } + + for (;;) { + SLAndroidSimpleBufferQueueState state; + + MA_OPENSL_BUFFERQUEUE(pBufferQueue)->GetState(pBufferQueue, &state); + if (state.count == 0) { + break; + } + + ma_sleep(10); + } + + if (pDevice->type == ma_device_type_capture) { + pDevice->opensl.isDrainingCapture = MA_FALSE; + } else { + pDevice->opensl.isDrainingPlayback = MA_FALSE; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__opensl(ma_device* pDevice) +{ + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before stopping/uninitializing the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_device_drain__opensl(pDevice, ma_device_type_capture); + + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal capture device."); + return ma_result_from_OpenSL(resultSL); + } + + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_drain__opensl(pDevice, ma_device_type_playback); + + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal playback device."); + return ma_result_from_OpenSL(resultSL); + } + + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback); + } + + /* Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. */ + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__opensl(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_opensl); + (void)pContext; + + /* Uninit global data. */ + ma_spinlock_lock(&g_maOpenSLSpinlock); + { + MA_ASSERT(g_maOpenSLInitCounter > 0); /* If you've triggered this, it means you have ma_context_init/uninit mismatch. Each successful call to ma_context_init() must be matched up with a call to ma_context_uninit(). */ + + g_maOpenSLInitCounter -= 1; + if (g_maOpenSLInitCounter == 0) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + } + } + ma_spinlock_unlock(&g_maOpenSLSpinlock); + + return MA_SUCCESS; +} + +static ma_result ma_dlsym_SLInterfaceID__opensl(ma_context* pContext, const char* pName, ma_handle* pHandle) +{ + /* We need to return an error if the symbol cannot be found. This is important because there have been reports that some symbols do not exist. */ + ma_handle* p = (ma_handle*)ma_dlsym(ma_context_get_log(pContext), pContext->opensl.libOpenSLES, pName); + if (p == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Cannot find symbol %s", pName); + return MA_NO_BACKEND; + } + + *pHandle = *p; + return MA_SUCCESS; +} + +static ma_result ma_context_init_engine_nolock__opensl(ma_context* pContext) +{ + g_maOpenSLInitCounter += 1; + if (g_maOpenSLInitCounter == 1) { + SLresult resultSL; + + resultSL = ((ma_slCreateEngine_proc)pContext->opensl.slCreateEngine)(&g_maEngineObjectSL, 0, NULL, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + g_maOpenSLInitCounter -= 1; + return ma_result_from_OpenSL(resultSL); + } + + (*g_maEngineObjectSL)->Realize(g_maEngineObjectSL, SL_BOOLEAN_FALSE); + + resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, (SLInterfaceID)pContext->opensl.SL_IID_ENGINE, &g_maEngineSL); + if (resultSL != SL_RESULT_SUCCESS) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + g_maOpenSLInitCounter -= 1; + return ma_result_from_OpenSL(resultSL); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_init__opensl(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result; + +#if !defined(MA_NO_RUNTIME_LINKING) + size_t i; + const char* libOpenSLESNames[] = { + "libOpenSLES.so" + }; +#endif + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + +#if !defined(MA_NO_RUNTIME_LINKING) + /* + Dynamically link against libOpenSLES.so. I have now had multiple reports that SL_IID_ANDROIDSIMPLEBUFFERQUEUE cannot be found. One + report was happening at compile time and another at runtime. To try working around this, I'm going to link to libOpenSLES at runtime + and extract the symbols rather than reference them directly. This should, hopefully, fix these issues as the compiler won't see any + references to the symbols and will hopefully skip the checks. + */ + for (i = 0; i < ma_countof(libOpenSLESNames); i += 1) { + pContext->opensl.libOpenSLES = ma_dlopen(ma_context_get_log(pContext), libOpenSLESNames[i]); + if (pContext->opensl.libOpenSLES != NULL) { + break; + } + } + + if (pContext->opensl.libOpenSLES == NULL) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Could not find libOpenSLES.so"); + return MA_NO_BACKEND; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_ENGINE", &pContext->opensl.SL_IID_ENGINE); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_AUDIOIODEVICECAPABILITIES", &pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE", &pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_RECORD", &pContext->opensl.SL_IID_RECORD); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_PLAY", &pContext->opensl.SL_IID_PLAY); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_OUTPUTMIX", &pContext->opensl.SL_IID_OUTPUTMIX); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_ANDROIDCONFIGURATION", &pContext->opensl.SL_IID_ANDROIDCONFIGURATION); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + pContext->opensl.slCreateEngine = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->opensl.libOpenSLES, "slCreateEngine"); + if (pContext->opensl.slCreateEngine == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Cannot find symbol slCreateEngine."); + return MA_NO_BACKEND; + } +#else + pContext->opensl.SL_IID_ENGINE = (ma_handle)SL_IID_ENGINE; + pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES = (ma_handle)SL_IID_AUDIOIODEVICECAPABILITIES; + pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE = (ma_handle)SL_IID_ANDROIDSIMPLEBUFFERQUEUE; + pContext->opensl.SL_IID_RECORD = (ma_handle)SL_IID_RECORD; + pContext->opensl.SL_IID_PLAY = (ma_handle)SL_IID_PLAY; + pContext->opensl.SL_IID_OUTPUTMIX = (ma_handle)SL_IID_OUTPUTMIX; + pContext->opensl.SL_IID_ANDROIDCONFIGURATION = (ma_handle)SL_IID_ANDROIDCONFIGURATION; + pContext->opensl.slCreateEngine = (ma_proc)slCreateEngine; +#endif + + + /* Initialize global data first if applicable. */ + ma_spinlock_lock(&g_maOpenSLSpinlock); + { + result = ma_context_init_engine_nolock__opensl(pContext); + } + ma_spinlock_unlock(&g_maOpenSLSpinlock); + + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Failed to initialize OpenSL engine."); + return result; + } + + pCallbacks->onContextInit = ma_context_init__opensl; + pCallbacks->onContextUninit = ma_context_uninit__opensl; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__opensl; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__opensl; + pCallbacks->onDeviceInit = ma_device_init__opensl; + pCallbacks->onDeviceUninit = ma_device_uninit__opensl; + pCallbacks->onDeviceStart = ma_device_start__opensl; + pCallbacks->onDeviceStop = ma_device_stop__opensl; + pCallbacks->onDeviceRead = NULL; /* Not needed because OpenSL|ES is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not needed because OpenSL|ES is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not needed because OpenSL|ES is asynchronous. */ + + return MA_SUCCESS; +} +#endif /* OpenSL|ES */ + + +/****************************************************************************** + +Web Audio Backend + +******************************************************************************/ +#ifdef MA_HAS_WEBAUDIO +#include + +#if (__EMSCRIPTEN_major__ > 3) || (__EMSCRIPTEN_major__ == 3 && (__EMSCRIPTEN_minor__ > 1 || (__EMSCRIPTEN_minor__ == 1 && __EMSCRIPTEN_tiny__ >= 32))) +#include +#define MA_SUPPORT_AUDIO_WORKLETS +#endif + +/* +TODO: Version 0.12: Swap this logic around so that AudioWorklets are used by default. Add MA_NO_AUDIO_WORKLETS. +*/ +#if defined(MA_ENABLE_AUDIO_WORKLETS) && defined(MA_SUPPORT_AUDIO_WORKLETS) +#define MA_USE_AUDIO_WORKLETS +#endif + +/* The thread stack size must be a multiple of 16. */ +#ifndef MA_AUDIO_WORKLETS_THREAD_STACK_SIZE +#define MA_AUDIO_WORKLETS_THREAD_STACK_SIZE 16384 +#endif + +#if defined(MA_USE_AUDIO_WORKLETS) +#define MA_WEBAUDIO_LATENCY_HINT_BALANCED "balanced" +#define MA_WEBAUDIO_LATENCY_HINT_INTERACTIVE "interactive" +#define MA_WEBAUDIO_LATENCY_HINT_PLAYBACK "playback" +#endif + +static ma_bool32 ma_is_capture_supported__webaudio() +{ + return EM_ASM_INT({ + return (navigator.mediaDevices !== undefined && navigator.mediaDevices.getUserMedia !== undefined); + }, 0) != 0; /* Must pass in a dummy argument for C99 compatibility. */ +} + +#ifdef __cplusplus +extern "C" { +#endif + void* EMSCRIPTEN_KEEPALIVE ma_malloc_emscripten(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) + { + return ma_malloc(sz, pAllocationCallbacks); + } + + void EMSCRIPTEN_KEEPALIVE ma_free_emscripten(void* p, const ma_allocation_callbacks* pAllocationCallbacks) + { + ma_free(p, pAllocationCallbacks); + } + + void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_capture__webaudio(ma_device* pDevice, int frameCount, float* pFrames) + { + ma_device_handle_backend_data_callback(pDevice, NULL, pFrames, (ma_uint32)frameCount); + } + + void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_playback__webaudio(ma_device* pDevice, int frameCount, float* pFrames) + { + ma_device_handle_backend_data_callback(pDevice, pFrames, NULL, (ma_uint32)frameCount); + } +#ifdef __cplusplus +} +#endif + +static ma_result ma_context_enumerate_devices__webaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Only supporting default devices for now. */ + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only supporting default devices. */ + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + if (ma_is_capture_supported__webaudio()) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only supporting default devices. */ + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__webaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { + return MA_NO_DEVICE; + } + + MA_ZERO_MEMORY(pDeviceInfo->id.webaudio, sizeof(pDeviceInfo->id.webaudio)); + + /* Only supporting default devices for now. */ + (void)pDeviceID; + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* Only supporting default devices. */ + pDeviceInfo->isDefault = MA_TRUE; + + /* Web Audio can support any number of channels and sample rates. It only supports f32 formats, however. */ + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormats[0].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[0].channels = 0; /* All channels are supported. */ + pDeviceInfo->nativeDataFormats[0].sampleRate = EM_ASM_INT({ + try { + var temp = new (window.AudioContext || window.webkitAudioContext)(); + var sampleRate = temp.sampleRate; + temp.close(); + return sampleRate; + } catch(e) { + return 0; + } + }, 0); /* Must pass in a dummy argument for C99 compatibility. */ + + if (pDeviceInfo->nativeDataFormats[0].sampleRate == 0) { + return MA_NO_DEVICE; + } + + pDeviceInfo->nativeDataFormatCount = 1; + + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + +#if defined(MA_USE_AUDIO_WORKLETS) + { + EM_ASM({ + var device = miniaudio.get_device_by_index($0); + + if (device.streamNode !== undefined) { + device.streamNode.disconnect(); + device.streamNode = undefined; + } + }, pDevice->webaudio.deviceIndex); + + emscripten_destroy_web_audio_node(pDevice->webaudio.audioWorklet); + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + ma_free(pDevice->webaudio.pStackBuffer, &pDevice->pContext->allocationCallbacks); + } +#else + { + EM_ASM({ + var device = miniaudio.get_device_by_index($0); + + /* Make sure all nodes are disconnected and marked for collection. */ + if (device.scriptNode !== undefined) { + device.scriptNode.onaudioprocess = function(e) {}; /* We want to reset the callback to ensure it doesn't get called after AudioContext.close() has returned. Shouldn't happen since we're disconnecting, but just to be safe... */ + device.scriptNode.disconnect(); + device.scriptNode = undefined; + } + + if (device.streamNode !== undefined) { + device.streamNode.disconnect(); + device.streamNode = undefined; + } + + /* + Stop the device. I think there is a chance the callback could get fired after calling this, hence why we want + to clear the callback before closing. + */ + device.webaudio.close(); + device.webaudio = undefined; + }, pDevice->webaudio.deviceIndex); + } +#endif + + /* Clean up the device on the JS side. */ + EM_ASM({ + miniaudio.untrack_device_by_index($0); + }, pDevice->webaudio.deviceIndex); + + ma_free(pDevice->webaudio.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks); + + return MA_SUCCESS; +} + +#if !defined(MA_USE_AUDIO_WORKLETS) +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__webaudio(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* + There have been reports of the default buffer size being too small on some browsers. If we're using + the default buffer size, we'll make sure the period size is bigger than our standard defaults. + */ + ma_uint32 periodSizeInFrames; + + if (pDescriptor->periodSizeInFrames == 0) { + if (pDescriptor->periodSizeInMilliseconds == 0) { + if (performanceProfile == ma_performance_profile_low_latency) { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(33, nativeSampleRate); /* 1 frame @ 30 FPS */ + } else { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(333, nativeSampleRate); + } + } else { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptor->periodSizeInMilliseconds, nativeSampleRate); + } + } else { + periodSizeInFrames = pDescriptor->periodSizeInFrames; + } + + /* The size of the buffer must be a power of 2 and between 256 and 16384. */ + if (periodSizeInFrames < 256) { + periodSizeInFrames = 256; + } else if (periodSizeInFrames > 16384) { + periodSizeInFrames = 16384; + } else { + periodSizeInFrames = ma_next_power_of_2(periodSizeInFrames); + } + + return periodSizeInFrames; +} +#endif + + +#if defined(MA_USE_AUDIO_WORKLETS) +typedef struct +{ + ma_device* pDevice; + const ma_device_config* pConfig; + ma_device_descriptor* pDescriptorPlayback; + ma_device_descriptor* pDescriptorCapture; +} ma_audio_worklet_thread_initialized_data; + +static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const AudioSampleFrame* pInputs, int outputCount, AudioSampleFrame* pOutputs, int paramCount, const AudioParamFrame* pParams, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_uint32 frameCount; + + (void)paramCount; + (void)pParams; + + if (ma_device_get_state(pDevice) != ma_device_state_started) { + return EM_TRUE; + } + + /* + The Emscripten documentation says that it'll always be 128 frames being passed in. Hard coding it like that feels + like a very bad idea to me. Even if it's hard coded in the backend, the API and documentation should always refer + to variables instead of a hard coded number. In any case, will follow along for the time being. + + Unfortunately the audio data is not interleaved so we'll need to convert it before we give the data to miniaudio + for further processing. + */ + frameCount = 128; + + if (inputCount > 0) { + /* Input data needs to be interleaved before we hand it to the client. */ + for (ma_uint32 iChannel = 0; iChannel < pDevice->capture.internalChannels; iChannel += 1) { + for (ma_uint32 iFrame = 0; iFrame < frameCount; iFrame += 1) { + pDevice->webaudio.pIntermediaryBuffer[iFrame*pDevice->capture.internalChannels + iChannel] = pInputs[0].data[frameCount*iChannel + iFrame]; + } + } + + ma_device_process_pcm_frames_capture__webaudio(pDevice, frameCount, pDevice->webaudio.pIntermediaryBuffer); + } + + if (outputCount > 0) { + /* If it's a capture-only device, we'll need to output silence. */ + if (pDevice->type == ma_device_type_capture) { + MA_ZERO_MEMORY(pOutputs[0].data, frameCount * pDevice->playback.internalChannels * sizeof(float)); + } else { + ma_device_process_pcm_frames_playback__webaudio(pDevice, frameCount, pDevice->webaudio.pIntermediaryBuffer); + + /* We've read the data from the client. Now we need to deinterleave the buffer and output to the output buffer. */ + for (ma_uint32 iChannel = 0; iChannel < pDevice->playback.internalChannels; iChannel += 1) { + for (ma_uint32 iFrame = 0; iFrame < frameCount; iFrame += 1) { + pOutputs[0].data[frameCount*iChannel + iFrame] = pDevice->webaudio.pIntermediaryBuffer[iFrame*pDevice->playback.internalChannels + iChannel]; + } + } + } + } + + return EM_TRUE; +} + + +static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void* pUserData) +{ + ma_audio_worklet_thread_initialized_data* pParameters = (ma_audio_worklet_thread_initialized_data*)pUserData; + EmscriptenAudioWorkletNodeCreateOptions audioWorkletOptions; + int channels = 0; + size_t intermediaryBufferSizeInFrames; + int sampleRate; + + if (success == EM_FALSE) { + pParameters->pDevice->webaudio.initResult = MA_ERROR; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + + /* The next step is to initialize the audio worklet node. */ + MA_ZERO_OBJECT(&audioWorkletOptions); + + /* + The way channel counts work with Web Audio is confusing. As far as I can tell, there's no way to know the channel + count from MediaStreamAudioSourceNode (what we use for capture)? The only way to have control is to configure an + output channel count on the capture side. This is slightly confusing for capture mode because intuitively you + wouldn't actually connect an output to an input-only node, but this is what we'll have to do in order to have + proper control over the channel count. In the capture case, we'll have to output silence to it's output node. + */ + if (pParameters->pConfig->deviceType == ma_device_type_capture) { + channels = (int)((pParameters->pDescriptorCapture->channels > 0) ? pParameters->pDescriptorCapture->channels : MA_DEFAULT_CHANNELS); + audioWorkletOptions.numberOfInputs = 1; + } else { + channels = (int)((pParameters->pDescriptorPlayback->channels > 0) ? pParameters->pDescriptorPlayback->channels : MA_DEFAULT_CHANNELS); + + if (pParameters->pConfig->deviceType == ma_device_type_duplex) { + audioWorkletOptions.numberOfInputs = 1; + } else { + audioWorkletOptions.numberOfInputs = 0; + } + } + + audioWorkletOptions.numberOfOutputs = 1; + audioWorkletOptions.outputChannelCounts = &channels; + + + /* + Now that we know the channel count to use we can allocate the intermediary buffer. The + intermediary buffer is used for interleaving and deinterleaving. + */ + intermediaryBufferSizeInFrames = 128; + + pParameters->pDevice->webaudio.pIntermediaryBuffer = (float*)ma_malloc(intermediaryBufferSizeInFrames * (ma_uint32)channels * sizeof(float), &pParameters->pDevice->pContext->allocationCallbacks); + if (pParameters->pDevice->webaudio.pIntermediaryBuffer == NULL) { + pParameters->pDevice->webaudio.initResult = MA_OUT_OF_MEMORY; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + + + pParameters->pDevice->webaudio.audioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "miniaudio", &audioWorkletOptions, &ma_audio_worklet_process_callback__webaudio, pParameters->pDevice); + + /* With the audio worklet initialized we can now attach it to the graph. */ + if (pParameters->pConfig->deviceType == ma_device_type_capture || pParameters->pConfig->deviceType == ma_device_type_duplex) { + ma_result attachmentResult = EM_ASM_INT({ + var getUserMediaResult = 0; + var audioWorklet = emscriptenGetAudioObject($0); + var audioContext = emscriptenGetAudioObject($1); + + navigator.mediaDevices.getUserMedia({audio:true, video:false}) + .then(function(stream) { + audioContext.streamNode = audioContext.createMediaStreamSource(stream); + audioContext.streamNode.connect(audioWorklet); + audioWorklet.connect(audioContext.destination); + getUserMediaResult = 0; /* 0 = MA_SUCCESS */ + }) + .catch(function(error) { + console.log("navigator.mediaDevices.getUserMedia Failed: " + error); + getUserMediaResult = -1; /* -1 = MA_ERROR */ + }); + + return getUserMediaResult; + }, pParameters->pDevice->webaudio.audioWorklet, audioContext); + + if (attachmentResult != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pParameters->pDevice), MA_LOG_LEVEL_ERROR, "Web Audio: Failed to connect capture node."); + emscripten_destroy_web_audio_node(pParameters->pDevice->webaudio.audioWorklet); + pParameters->pDevice->webaudio.initResult = attachmentResult; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + } + + /* If it's playback only we can now attach the worklet node to the graph. This has already been done for the duplex case. */ + if (pParameters->pConfig->deviceType == ma_device_type_playback) { + ma_result attachmentResult = EM_ASM_INT({ + var audioWorklet = emscriptenGetAudioObject($0); + var audioContext = emscriptenGetAudioObject($1); + audioWorklet.connect(audioContext.destination); + return 0; /* 0 = MA_SUCCESS */ + }, pParameters->pDevice->webaudio.audioWorklet, audioContext); + + if (attachmentResult != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pParameters->pDevice), MA_LOG_LEVEL_ERROR, "Web Audio: Failed to connect playback node."); + pParameters->pDevice->webaudio.initResult = attachmentResult; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + } + + /* We need to update the descriptors so that they reflect the internal data format. Both capture and playback should be the same. */ + sampleRate = EM_ASM_INT({ return emscriptenGetAudioObject($0).sampleRate; }, audioContext); + + if (pParameters->pDescriptorCapture != NULL) { + pParameters->pDescriptorCapture->format = ma_format_f32; + pParameters->pDescriptorCapture->channels = (ma_uint32)channels; + pParameters->pDescriptorCapture->sampleRate = (ma_uint32)sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pParameters->pDescriptorCapture->channelMap, ma_countof(pParameters->pDescriptorCapture->channelMap), pParameters->pDescriptorCapture->channels); + pParameters->pDescriptorCapture->periodSizeInFrames = intermediaryBufferSizeInFrames; + pParameters->pDescriptorCapture->periodCount = 1; + } + + if (pParameters->pDescriptorPlayback != NULL) { + pParameters->pDescriptorPlayback->format = ma_format_f32; + pParameters->pDescriptorPlayback->channels = (ma_uint32)channels; + pParameters->pDescriptorPlayback->sampleRate = (ma_uint32)sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pParameters->pDescriptorPlayback->channelMap, ma_countof(pParameters->pDescriptorPlayback->channelMap), pParameters->pDescriptorPlayback->channels); + pParameters->pDescriptorPlayback->periodSizeInFrames = intermediaryBufferSizeInFrames; + pParameters->pDescriptorPlayback->periodCount = 1; + } + + /* At this point we're done and we can return. */ + ma_log_postf(ma_device_get_log(pParameters->pDevice), MA_LOG_LEVEL_DEBUG, "AudioWorklets: Created worklet node: %d\n", pParameters->pDevice->webaudio.audioWorklet); + pParameters->pDevice->webaudio.initResult = MA_SUCCESS; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); +} + +static void ma_audio_worklet_thread_initialized__webaudio(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void* pUserData) +{ + ma_audio_worklet_thread_initialized_data* pParameters = (ma_audio_worklet_thread_initialized_data*)pUserData; + WebAudioWorkletProcessorCreateOptions workletProcessorOptions; + + MA_ASSERT(pParameters != NULL); + + if (success == EM_FALSE) { + pParameters->pDevice->webaudio.initResult = MA_ERROR; + return; + } + + MA_ZERO_OBJECT(&workletProcessorOptions); + workletProcessorOptions.name = "miniaudio"; /* I'm not entirely sure what to call this. Does this need to be globally unique, or does it need only be unique for a given AudioContext? */ + + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &workletProcessorOptions, ma_audio_worklet_processor_created__webaudio, pParameters); +} +#endif + +static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with Web Audio. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* + With AudioWorklets we'll have just a single AudioContext. I'm not sure why I'm not doing this for ScriptProcessorNode so + it might be worthwhile to look into that as well. + */ +#if defined(MA_USE_AUDIO_WORKLETS) + { + EmscriptenWebAudioCreateAttributes audioContextAttributes; + ma_audio_worklet_thread_initialized_data* pInitParameters; + void* pStackBuffer; + + if (pConfig->performanceProfile == ma_performance_profile_conservative) { + audioContextAttributes.latencyHint = MA_WEBAUDIO_LATENCY_HINT_PLAYBACK; + } else { + audioContextAttributes.latencyHint = MA_WEBAUDIO_LATENCY_HINT_INTERACTIVE; + } + + /* + In my testing, Firefox does not seem to capture audio data properly if the sample rate is set + to anything other than 48K. This does not seem to be the case for other browsers. For this reason, + if the device type is anything other than playback, we'll leave the sample rate as-is and let the + browser pick the appropriate rate for us. + */ + if (pConfig->deviceType == ma_device_type_playback) { + audioContextAttributes.sampleRate = pDescriptorPlayback->sampleRate; + } else { + audioContextAttributes.sampleRate = 0; + } + + /* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */ + pDevice->webaudio.audioContext = emscripten_create_audio_context(&audioContextAttributes); + + + /* + With the context created we can now create the worklet. We can only have a single worklet per audio + context which means we'll need to craft this appropriately to handle duplex devices correctly. + */ + + /* + We now need to create a worker thread. This is a bit weird because we need to allocate our + own buffer for the thread's stack. The stack needs to be aligned to 16 bytes. I'm going to + allocate this on the heap to keep it simple. + */ + pStackBuffer = ma_aligned_malloc(MA_AUDIO_WORKLETS_THREAD_STACK_SIZE, 16, &pDevice->pContext->allocationCallbacks); + if (pStackBuffer == NULL) { + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + return MA_OUT_OF_MEMORY; + } + + /* Our thread initialization parameters need to be allocated on the heap so they don't go out of scope. */ + pInitParameters = (ma_audio_worklet_thread_initialized_data*)ma_malloc(sizeof(*pInitParameters), &pDevice->pContext->allocationCallbacks); + if (pInitParameters == NULL) { + ma_free(pStackBuffer, &pDevice->pContext->allocationCallbacks); + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + return MA_OUT_OF_MEMORY; + } + + pInitParameters->pDevice = pDevice; + pInitParameters->pConfig = pConfig; + pInitParameters->pDescriptorPlayback = pDescriptorPlayback; + pInitParameters->pDescriptorCapture = pDescriptorCapture; + + /* + We need to flag the device as not yet initialized so we can wait on it later. Unfortunately all of + the Emscripten WebAudio stuff is asynchronous. + */ + pDevice->webaudio.initResult = MA_BUSY; + { + emscripten_start_wasm_audio_worklet_thread_async(pDevice->webaudio.audioContext, pStackBuffer, MA_AUDIO_WORKLETS_THREAD_STACK_SIZE, ma_audio_worklet_thread_initialized__webaudio, pInitParameters); + } + while (pDevice->webaudio.initResult == MA_BUSY) { emscripten_sleep(1); } /* We must wait for initialization to complete. We're just spinning here. The emscripten_sleep() call is why we need to build with `-sASYNCIFY`. */ + + /* Initialization is now complete. Descriptors were updated when the worklet was initialized. */ + if (pDevice->webaudio.initResult != MA_SUCCESS) { + ma_free(pStackBuffer, &pDevice->pContext->allocationCallbacks); + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + return pDevice->webaudio.initResult; + } + + /* We need to add an entry to the miniaudio.devices list on the JS side so we can do some JS/C interop. */ + pDevice->webaudio.deviceIndex = EM_ASM_INT({ + return miniaudio.track_device({ + webaudio: emscriptenGetAudioObject($0), + state: 1 /* 1 = ma_device_state_stopped */ + }); + }, pDevice->webaudio.audioContext); + + return MA_SUCCESS; + } +#else + { + /* ScriptProcessorNode. This path requires us to do almost everything in JS, but we'll do as much as we can in C. */ + ma_uint32 deviceIndex; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 periodSizeInFrames; + + /* The channel count will depend on the device type. If it's a capture, use it's, otherwise use the playback side. */ + if (pConfig->deviceType == ma_device_type_capture) { + channels = (pDescriptorCapture->channels > 0) ? pDescriptorCapture->channels : MA_DEFAULT_CHANNELS; + } else { + channels = (pDescriptorPlayback->channels > 0) ? pDescriptorPlayback->channels : MA_DEFAULT_CHANNELS; + } + + /* + When testing in Firefox, I've seen it where capture mode fails if the sample rate is changed to anything other than it's + native rate. For this reason we're leaving the sample rate untouched for capture devices. + */ + if (pConfig->deviceType == ma_device_type_playback) { + sampleRate = pDescriptorPlayback->sampleRate; + } else { + sampleRate = 0; /* Let the browser decide when capturing. */ + } + + /* The period size needs to be a power of 2. */ + if (pConfig->deviceType == ma_device_type_capture) { + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__webaudio(pDescriptorCapture, sampleRate, pConfig->performanceProfile); + } else { + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__webaudio(pDescriptorPlayback, sampleRate, pConfig->performanceProfile); + } + + /* We need an intermediary buffer for doing interleaving and deinterleaving. */ + pDevice->webaudio.pIntermediaryBuffer = (float*)ma_malloc(periodSizeInFrames * channels * sizeof(float), &pDevice->pContext->allocationCallbacks); + if (pDevice->webaudio.pIntermediaryBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + deviceIndex = EM_ASM_INT({ + var deviceType = $0; + var channels = $1; + var sampleRate = $2; + var bufferSize = $3; + var pIntermediaryBuffer = $4; + var pDevice = $5; + + if (typeof(window.miniaudio) === 'undefined') { + return -1; /* Context not initialized. */ + } + + var device = {}; + + /* First thing we need is an AudioContext. */ + var audioContextOptions = {}; + if (deviceType == window.miniaudio.device_type.playback) { + audioContextOptions.sampleRate = sampleRate; + } + + device.webaudio = new (window.AudioContext || window.webkitAudioContext)(audioContextOptions); + device.webaudio.suspend(); /* The AudioContext must be created in a suspended state. */ + device.state = window.miniaudio.device_state.stopped; + + /* + We need to create a ScriptProcessorNode. The channel situation is the same as the AudioWorklet path in that we + need to specify an output and configure the channel count there. + */ + var channelCountIn = 0; + var channelCountOut = channels; + if (deviceType != window.miniaudio.device_type.playback) { + channelCountIn = channels; + } + + device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, channelCountIn, channelCountOut); + + /* The node processing callback. */ + device.scriptNode.onaudioprocess = function(e) { + if (device.intermediaryBufferView == null || device.intermediaryBufferView.length == 0) { + device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels); + } + + /* Do the capture side first. */ + if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) { + /* The data must be interleaved before being processed miniaudio. */ + for (var iChannel = 0; iChannel < channels; iChannel += 1) { + var inputBuffer = e.inputBuffer.getChannelData(iChannel); + var intermediaryBuffer = device.intermediaryBufferView; + + for (var iFrame = 0; iFrame < bufferSize; iFrame += 1) { + intermediaryBuffer[iFrame*channels + iChannel] = inputBuffer[iFrame]; + } + } + + _ma_device_process_pcm_frames_capture__webaudio(pDevice, bufferSize, pIntermediaryBuffer); + } + + if (deviceType == miniaudio.device_type.playback || deviceType == miniaudio.device_type.duplex) { + _ma_device_process_pcm_frames_playback__webaudio(pDevice, bufferSize, pIntermediaryBuffer); + + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + var outputBuffer = e.outputBuffer.getChannelData(iChannel); + var intermediaryBuffer = device.intermediaryBufferView; + + for (var iFrame = 0; iFrame < bufferSize; iFrame += 1) { + outputBuffer[iFrame] = intermediaryBuffer[iFrame*channels + iChannel]; + } + } + } else { + /* It's a capture-only device. Make sure the output is silenced. */ + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + e.outputBuffer.getChannelData(iChannel).fill(0.0); + } + } + }; + + /* Now we need to connect our node to the graph. */ + if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) { + navigator.mediaDevices.getUserMedia({audio:true, video:false}) + .then(function(stream) { + device.streamNode = device.webaudio.createMediaStreamSource(stream); + device.streamNode.connect(device.scriptNode); + device.scriptNode.connect(device.webaudio.destination); + }) + .catch(function(error) { + console.log("Failed to get user media: " + error); + }); + } + + if (deviceType == miniaudio.device_type.playback) { + device.scriptNode.connect(device.webaudio.destination); + } + + return miniaudio.track_device(device); + }, pConfig->deviceType, channels, sampleRate, periodSizeInFrames, pDevice->webaudio.pIntermediaryBuffer, pDevice); + + if (deviceIndex < 0) { + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + pDevice->webaudio.deviceIndex = deviceIndex; + + /* Grab the sample rate from the audio context directly. */ + sampleRate = (ma_uint32)EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); + + if (pDescriptorCapture != NULL) { + pDescriptorCapture->format = ma_format_f32; + pDescriptorCapture->channels = channels; + pDescriptorCapture->sampleRate = sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + pDescriptorCapture->periodSizeInFrames = periodSizeInFrames; + pDescriptorCapture->periodCount = 1; + } + + if (pDescriptorPlayback != NULL) { + pDescriptorPlayback->format = ma_format_f32; + pDescriptorPlayback->channels = channels; + pDescriptorPlayback->sampleRate = sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels); + pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames; + pDescriptorPlayback->periodCount = 1; + } + + return MA_SUCCESS; + } +#endif +} + +static ma_result ma_device_start__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + EM_ASM({ + var device = miniaudio.get_device_by_index($0); + device.webaudio.resume(); + device.state = miniaudio.device_state.started; + }, pDevice->webaudio.deviceIndex); + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + From the WebAudio API documentation for AudioContext.suspend(): + + Suspends the progression of AudioContext's currentTime, allows any current context processing blocks that are already processed to be played to the + destination, and then allows the system to release its claim on audio hardware. + + I read this to mean that "any current context processing blocks" are processed by suspend() - i.e. They they are drained. We therefore shouldn't need to + do any kind of explicit draining. + */ + EM_ASM({ + var device = miniaudio.get_device_by_index($0); + device.webaudio.suspend(); + device.state = miniaudio.device_state.stopped; + }, pDevice->webaudio.deviceIndex); + + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__webaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_webaudio); + + (void)pContext; /* Unused. */ + + /* Remove the global miniaudio object from window if there are no more references to it. */ + EM_ASM({ + if (typeof(window.miniaudio) !== 'undefined') { + window.miniaudio.referenceCount -= 1; + if (window.miniaudio.referenceCount === 0) { + delete window.miniaudio; + } + } + }); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + int resultFromJS; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; /* Unused. */ + + /* Here is where our global JavaScript object is initialized. */ + resultFromJS = EM_ASM_INT({ + if (typeof window === 'undefined' || (window.AudioContext || window.webkitAudioContext) === undefined) { + return 0; /* Web Audio not supported. */ + } + + if (typeof(window.miniaudio) === 'undefined') { + window.miniaudio = { + referenceCount: 0 + }; + + /* Device types. */ + window.miniaudio.device_type = {}; + window.miniaudio.device_type.playback = $0; + window.miniaudio.device_type.capture = $1; + window.miniaudio.device_type.duplex = $2; + + /* Device states. */ + window.miniaudio.device_state = {}; + window.miniaudio.device_state.stopped = $3; + window.miniaudio.device_state.started = $4; + + /* Device cache for mapping devices to indexes for JavaScript/C interop. */ + miniaudio.devices = []; + + miniaudio.track_device = function(device) { + /* Try inserting into a free slot first. */ + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == null) { + miniaudio.devices[iDevice] = device; + return iDevice; + } + } + + /* Getting here means there is no empty slots in the array so we just push to the end. */ + miniaudio.devices.push(device); + return miniaudio.devices.length - 1; + }; + + miniaudio.untrack_device_by_index = function(deviceIndex) { + /* We just set the device's slot to null. The slot will get reused in the next call to ma_track_device. */ + miniaudio.devices[deviceIndex] = null; + + /* Trim the array if possible. */ + while (miniaudio.devices.length > 0) { + if (miniaudio.devices[miniaudio.devices.length-1] == null) { + miniaudio.devices.pop(); + } else { + break; + } + } + }; + + miniaudio.untrack_device = function(device) { + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == device) { + return miniaudio.untrack_device_by_index(iDevice); + } + } + }; + + miniaudio.get_device_by_index = function(deviceIndex) { + return miniaudio.devices[deviceIndex]; + }; + + miniaudio.unlock_event_types = (function(){ + return ['touchstart', 'touchend', 'click']; + })(); + + miniaudio.unlock = function() { + for(var i = 0; i < miniaudio.devices.length; ++i) { + var device = miniaudio.devices[i]; + if (device != null && device.webaudio != null && device.state === 2 /* ma_device_state_started */) { + device.webaudio.resume(); + } + } + miniaudio.unlock_event_types.map(function(event_type) { + document.removeEventListener(event_type, miniaudio.unlock, true); + }); + }; + + miniaudio.unlock_event_types.map(function(event_type) { + document.addEventListener(event_type, miniaudio.unlock, true); + }); + } + + window.miniaudio.referenceCount += 1; + + return 1; + }, ma_device_type_playback, ma_device_type_capture, ma_device_type_duplex, ma_device_state_stopped, ma_device_state_started); + + if (resultFromJS != 1) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pCallbacks->onContextInit = ma_context_init__webaudio; + pCallbacks->onContextUninit = ma_context_uninit__webaudio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__webaudio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__webaudio; + pCallbacks->onDeviceInit = ma_device_init__webaudio; + pCallbacks->onDeviceUninit = ma_device_uninit__webaudio; + pCallbacks->onDeviceStart = ma_device_start__webaudio; + pCallbacks->onDeviceStop = ma_device_stop__webaudio; + pCallbacks->onDeviceRead = NULL; /* Not needed because WebAudio is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not needed because WebAudio is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not needed because WebAudio is asynchronous. */ + + return MA_SUCCESS; +} +#endif /* Web Audio */ + + + +static ma_bool32 ma__is_channel_map_valid(const ma_channel* pChannelMap, ma_uint32 channels) +{ + /* A blank channel map should be allowed, in which case it should use an appropriate default which will depend on context. */ + if (pChannelMap != NULL && pChannelMap[0] != MA_CHANNEL_NONE) { + ma_uint32 iChannel; + + if (channels == 0 || channels > MA_MAX_CHANNELS) { + return MA_FALSE; /* Channel count out of range. */ + } + + /* A channel cannot be present in the channel map more than once. */ + for (iChannel = 0; iChannel < channels; ++iChannel) { + ma_uint32 jChannel; + for (jChannel = iChannel + 1; jChannel < channels; ++jChannel) { + if (pChannelMap[iChannel] == pChannelMap[jChannel]) { + return MA_FALSE; + } + } + } + } + + return MA_TRUE; +} + + +static ma_bool32 ma_context_is_backend_asynchronous(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + if (pContext->callbacks.onDeviceRead == NULL && pContext->callbacks.onDeviceWrite == NULL) { + if (pContext->callbacks.onDeviceDataLoop == NULL) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } +} + + +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + if (pDevice->capture.format == ma_format_unknown) { + pDevice->capture.format = pDevice->capture.internalFormat; + } + if (pDevice->capture.channels == 0) { + pDevice->capture.channels = pDevice->capture.internalChannels; + } + if (pDevice->capture.channelMap[0] == MA_CHANNEL_NONE) { + MA_ASSERT(pDevice->capture.channels <= MA_MAX_CHANNELS); + if (pDevice->capture.internalChannels == pDevice->capture.channels) { + ma_channel_map_copy(pDevice->capture.channelMap, pDevice->capture.internalChannelMap, pDevice->capture.channels); + } else { + if (pDevice->capture.channelMixMode == ma_channel_mix_mode_simple) { + ma_channel_map_init_blank(pDevice->capture.channelMap, pDevice->capture.channels); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDevice->capture.channelMap, ma_countof(pDevice->capture.channelMap), pDevice->capture.channels); + } + } + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + if (pDevice->playback.format == ma_format_unknown) { + pDevice->playback.format = pDevice->playback.internalFormat; + } + if (pDevice->playback.channels == 0) { + pDevice->playback.channels = pDevice->playback.internalChannels; + } + if (pDevice->playback.channelMap[0] == MA_CHANNEL_NONE) { + MA_ASSERT(pDevice->playback.channels <= MA_MAX_CHANNELS); + if (pDevice->playback.internalChannels == pDevice->playback.channels) { + ma_channel_map_copy(pDevice->playback.channelMap, pDevice->playback.internalChannelMap, pDevice->playback.channels); + } else { + if (pDevice->playback.channelMixMode == ma_channel_mix_mode_simple) { + ma_channel_map_init_blank(pDevice->playback.channelMap, pDevice->playback.channels); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDevice->playback.channelMap, ma_countof(pDevice->playback.channelMap), pDevice->playback.channels); + } + } + } + } + + if (pDevice->sampleRate == 0) { + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + pDevice->sampleRate = pDevice->capture.internalSampleRate; + } else { + pDevice->sampleRate = pDevice->playback.internalSampleRate; + } + } + + /* Data converters. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + /* Converting from internal device format to client format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->capture.internalFormat; + converterConfig.channelsIn = pDevice->capture.internalChannels; + converterConfig.sampleRateIn = pDevice->capture.internalSampleRate; + converterConfig.pChannelMapIn = pDevice->capture.internalChannelMap; + converterConfig.formatOut = pDevice->capture.format; + converterConfig.channelsOut = pDevice->capture.channels; + converterConfig.sampleRateOut = pDevice->sampleRate; + converterConfig.pChannelMapOut = pDevice->capture.channelMap; + converterConfig.channelMixMode = pDevice->capture.channelMixMode; + converterConfig.calculateLFEFromSpatialChannels = pDevice->capture.calculateLFEFromSpatialChannels; + converterConfig.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder; + converterConfig.resampling.pBackendVTable = pDevice->resampling.pBackendVTable; + converterConfig.resampling.pBackendUserData = pDevice->resampling.pBackendUserData; + + /* Make sure the old converter is uninitialized first. */ + if (ma_device_get_state(pDevice) != ma_device_state_uninitialized) { + ma_data_converter_uninit(&pDevice->capture.converter, &pDevice->pContext->allocationCallbacks); + } + + result = ma_data_converter_init(&converterConfig, &pDevice->pContext->allocationCallbacks, &pDevice->capture.converter); + if (result != MA_SUCCESS) { + return result; + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + /* Converting from client format to device format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->playback.format; + converterConfig.channelsIn = pDevice->playback.channels; + converterConfig.sampleRateIn = pDevice->sampleRate; + converterConfig.pChannelMapIn = pDevice->playback.channelMap; + converterConfig.formatOut = pDevice->playback.internalFormat; + converterConfig.channelsOut = pDevice->playback.internalChannels; + converterConfig.sampleRateOut = pDevice->playback.internalSampleRate; + converterConfig.pChannelMapOut = pDevice->playback.internalChannelMap; + converterConfig.channelMixMode = pDevice->playback.channelMixMode; + converterConfig.calculateLFEFromSpatialChannels = pDevice->playback.calculateLFEFromSpatialChannels; + converterConfig.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder; + converterConfig.resampling.pBackendVTable = pDevice->resampling.pBackendVTable; + converterConfig.resampling.pBackendUserData = pDevice->resampling.pBackendUserData; + + /* Make sure the old converter is uninitialized first. */ + if (ma_device_get_state(pDevice) != ma_device_state_uninitialized) { + ma_data_converter_uninit(&pDevice->playback.converter, &pDevice->pContext->allocationCallbacks); + } + + result = ma_data_converter_init(&converterConfig, &pDevice->pContext->allocationCallbacks, &pDevice->playback.converter); + if (result != MA_SUCCESS) { + return result; + } + } + + + /* + If the device is doing playback (ma_device_type_playback or ma_device_type_duplex), there's + a couple of situations where we'll need a heap allocated cache. + + The first is a duplex device for backends that use a callback for data delivery. The reason + this is needed is that the input stage needs to have a buffer to place the input data while it + waits for the playback stage, after which the miniaudio data callback will get fired. This is + not needed for backends that use a blocking API because miniaudio manages temporary buffers on + the stack to achieve this. + + The other situation is when the data converter does not have the ability to query the number + of input frames that are required in order to process a given number of output frames. When + performing data conversion, it's useful if miniaudio know exactly how many frames it needs + from the client in order to generate a given number of output frames. This way, only exactly + the number of frames are needed to be read from the client which means no cache is necessary. + On the other hand, if miniaudio doesn't know how many frames to read, it is forced to read + in fixed sized chunks and then cache any residual unused input frames, those of which will be + processed at a later stage. + */ + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + ma_uint64 unused; + + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = 0; + + if (pDevice->type == ma_device_type_duplex || /* Duplex. backend may decide to use ma_device_handle_backend_data_callback() which will require this cache. */ + ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, 1, &unused) != MA_SUCCESS) /* Data conversion required input frame calculation not supported. */ + { + /* We need a heap allocated cache. We want to size this based on the period size. */ + void* pNewInputCache; + ma_uint64 newInputCacheCap; + ma_uint64 newInputCacheSizeInBytes; + + newInputCacheCap = ma_calculate_frame_count_after_resampling(pDevice->playback.internalSampleRate, pDevice->sampleRate, pDevice->playback.internalPeriodSizeInFrames); + + newInputCacheSizeInBytes = newInputCacheCap * ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + if (newInputCacheSizeInBytes > MA_SIZE_MAX) { + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + pDevice->playback.pInputCache = NULL; + pDevice->playback.inputCacheCap = 0; + return MA_OUT_OF_MEMORY; /* Allocation too big. Should never hit this, but makes the cast below safer for 32-bit builds. */ + } + + pNewInputCache = ma_realloc(pDevice->playback.pInputCache, (size_t)newInputCacheSizeInBytes, &pDevice->pContext->allocationCallbacks); + if (pNewInputCache == NULL) { + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + pDevice->playback.pInputCache = NULL; + pDevice->playback.inputCacheCap = 0; + return MA_OUT_OF_MEMORY; + } + + pDevice->playback.pInputCache = pNewInputCache; + pDevice->playback.inputCacheCap = newInputCacheCap; + } else { + /* Heap allocation not required. Make sure we clear out the old cache just in case this function was called in response to a route change. */ + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + pDevice->playback.pInputCache = NULL; + pDevice->playback.inputCacheCap = 0; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_post_init(ma_device* pDevice, ma_device_type deviceType, const ma_device_descriptor* pDescriptorPlayback, const ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + /* Capture. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + if (ma_device_descriptor_is_valid(pDescriptorCapture) == MA_FALSE) { + return MA_INVALID_ARGS; + } + + pDevice->capture.internalFormat = pDescriptorCapture->format; + pDevice->capture.internalChannels = pDescriptorCapture->channels; + pDevice->capture.internalSampleRate = pDescriptorCapture->sampleRate; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap)); + pDevice->capture.internalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames; + pDevice->capture.internalPeriods = pDescriptorCapture->periodCount; + + if (pDevice->capture.internalPeriodSizeInFrames == 0) { + pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptorCapture->periodSizeInMilliseconds, pDescriptorCapture->sampleRate); + } + } + + /* Playback. */ + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + if (ma_device_descriptor_is_valid(pDescriptorPlayback) == MA_FALSE) { + return MA_INVALID_ARGS; + } + + pDevice->playback.internalFormat = pDescriptorPlayback->format; + pDevice->playback.internalChannels = pDescriptorPlayback->channels; + pDevice->playback.internalSampleRate = pDescriptorPlayback->sampleRate; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap)); + pDevice->playback.internalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames; + pDevice->playback.internalPeriods = pDescriptorPlayback->periodCount; + + if (pDevice->playback.internalPeriodSizeInFrames == 0) { + pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptorPlayback->periodSizeInMilliseconds, pDescriptorPlayback->sampleRate); + } + } + + /* + The name of the device can be retrieved from device info. This may be temporary and replaced with a `ma_device_get_info(pDevice, deviceType)` instead. + For loopback devices, we need to retrieve the name of the playback device. + */ + { + ma_device_info deviceInfo; + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + result = ma_device_get_info(pDevice, (deviceType == ma_device_type_loopback) ? ma_device_type_playback : ma_device_type_capture, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (pDescriptorCapture->pDeviceID == NULL) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), "Capture Device", (size_t)-1); + } + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + result = ma_device_get_info(pDevice, ma_device_type_playback, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (pDescriptorPlayback->pDeviceID == NULL) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), "Playback Device", (size_t)-1); + } + } + } + } + + /* Update data conversion. */ + return ma_device__post_init_setup(pDevice, deviceType); /* TODO: Should probably rename ma_device__post_init_setup() to something better. */ +} + + +static ma_thread_result MA_THREADCALL ma_worker_thread(void* pData) +{ + ma_device* pDevice = (ma_device*)pData; +#ifdef MA_WIN32 + HRESULT CoInitializeResult; +#endif + + MA_ASSERT(pDevice != NULL); + +#ifdef MA_WIN32 + CoInitializeResult = ma_CoInitializeEx(pDevice->pContext, NULL, MA_COINIT_VALUE); +#endif + + /* + When the device is being initialized it's initial state is set to ma_device_state_uninitialized. Before returning from + ma_device_init(), the state needs to be set to something valid. In miniaudio the device's default state immediately + after initialization is stopped, so therefore we need to mark the device as such. miniaudio will wait on the worker + thread to signal an event to know when the worker thread is ready for action. + */ + ma_device__set_state(pDevice, ma_device_state_stopped); + ma_event_signal(&pDevice->stopEvent); + + for (;;) { /* <-- This loop just keeps the thread alive. The main audio loop is inside. */ + ma_result startResult; + ma_result stopResult; /* <-- This will store the result from onDeviceStop(). If it returns an error, we don't fire the stopped notification callback. */ + + /* We wait on an event to know when something has requested that the device be started and the main loop entered. */ + ma_event_wait(&pDevice->wakeupEvent); + + /* Default result code. */ + pDevice->workResult = MA_SUCCESS; + + /* If the reason for the wake up is that we are terminating, just break from the loop. */ + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized) { + break; + } + + /* + Getting to this point means the device is wanting to get started. The function that has requested that the device + be started will be waiting on an event (pDevice->startEvent) which means we need to make sure we signal the event + in both the success and error case. It's important that the state of the device is set _before_ signaling the event. + */ + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_starting); + + /* If the device has a start callback, start it now. */ + if (pDevice->pContext->callbacks.onDeviceStart != NULL) { + startResult = pDevice->pContext->callbacks.onDeviceStart(pDevice); + } else { + startResult = MA_SUCCESS; + } + + /* + If starting was not successful we'll need to loop back to the start and wait for something + to happen (pDevice->wakeupEvent). + */ + if (startResult != MA_SUCCESS) { + pDevice->workResult = startResult; + ma_event_signal(&pDevice->startEvent); /* <-- Always signal the start event so ma_device_start() can return as it'll be waiting on it. */ + continue; + } + + /* Make sure the state is set appropriately. */ + ma_device__set_state(pDevice, ma_device_state_started); /* <-- Set this before signaling the event so that the state is always guaranteed to be good after ma_device_start() has returned. */ + ma_event_signal(&pDevice->startEvent); + + ma_device__on_notification_started(pDevice); + + if (pDevice->pContext->callbacks.onDeviceDataLoop != NULL) { + pDevice->pContext->callbacks.onDeviceDataLoop(pDevice); + } else { + /* The backend is not using a custom main loop implementation, so now fall back to the blocking read-write implementation. */ + ma_device_audio_thread__default_read_write(pDevice); + } + + /* Getting here means we have broken from the main loop which happens the application has requested that device be stopped. */ + if (pDevice->pContext->callbacks.onDeviceStop != NULL) { + stopResult = pDevice->pContext->callbacks.onDeviceStop(pDevice); + } else { + stopResult = MA_SUCCESS; /* No stop callback with the backend. Just assume successful. */ + } + + /* + After the device has stopped, make sure an event is posted. Don't post a stopped event if + stopping failed. This can happen on some backends when the underlying stream has been + stopped due to the device being physically unplugged or disabled via an OS setting. + */ + if (stopResult == MA_SUCCESS) { + ma_device__on_notification_stopped(pDevice); + } + + /* A function somewhere is waiting for the device to have stopped for real so we need to signal an event to allow it to continue. */ + ma_device__set_state(pDevice, ma_device_state_stopped); + ma_event_signal(&pDevice->stopEvent); + } + +#ifdef MA_WIN32 + if (CoInitializeResult == S_OK) { + ma_CoUninitialize(pDevice->pContext); + } +#endif + + return (ma_thread_result)0; +} + + +/* Helper for determining whether or not the given device is initialized. */ +static ma_bool32 ma_device__is_initialized(ma_device* pDevice) +{ + if (pDevice == NULL) { + return MA_FALSE; + } + + return ma_device_get_state(pDevice) != ma_device_state_uninitialized; +} + + +#ifdef MA_WIN32 +static ma_result ma_context_uninit_backend_apis__win32(ma_context* pContext) +{ + /* For some reason UWP complains when CoUninitialize() is called. I'm just not going to call it on UWP. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + if (pContext->win32.CoInitializeResult == S_OK) { + ma_CoUninitialize(pContext); + } + +#if defined(MA_WIN32_DESKTOP) + ma_dlclose(ma_context_get_log(pContext), pContext->win32.hUser32DLL); + ma_dlclose(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL); +#endif + + ma_dlclose(ma_context_get_log(pContext), pContext->win32.hOle32DLL); +#else + (void)pContext; +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init_backend_apis__win32(ma_context* pContext) +{ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +#if defined(MA_WIN32_DESKTOP) + /* User32.dll */ + pContext->win32.hUser32DLL = ma_dlopen(ma_context_get_log(pContext), "user32.dll"); + if (pContext->win32.hUser32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.GetForegroundWindow = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hUser32DLL, "GetForegroundWindow"); + pContext->win32.GetDesktopWindow = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hUser32DLL, "GetDesktopWindow"); + + + /* Advapi32.dll */ + pContext->win32.hAdvapi32DLL = ma_dlopen(ma_context_get_log(pContext), "advapi32.dll"); + if (pContext->win32.hAdvapi32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.RegOpenKeyExA = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL, "RegOpenKeyExA"); + pContext->win32.RegCloseKey = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL, "RegCloseKey"); + pContext->win32.RegQueryValueExA = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL, "RegQueryValueExA"); +#endif + + /* Ole32.dll */ + pContext->win32.hOle32DLL = ma_dlopen(ma_context_get_log(pContext), "ole32.dll"); + if (pContext->win32.hOle32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.CoInitialize = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoInitialize"); + pContext->win32.CoInitializeEx = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoInitializeEx"); + pContext->win32.CoUninitialize = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoUninitialize"); + pContext->win32.CoCreateInstance = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoCreateInstance"); + pContext->win32.CoTaskMemFree = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoTaskMemFree"); + pContext->win32.PropVariantClear = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "PropVariantClear"); + pContext->win32.StringFromGUID2 = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "StringFromGUID2"); +#else + (void)pContext; /* Unused. */ +#endif + + pContext->win32.CoInitializeResult = ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE); + return MA_SUCCESS; +} +#else +static ma_result ma_context_uninit_backend_apis__nix(ma_context* pContext) +{ + (void)pContext; + + return MA_SUCCESS; +} + +static ma_result ma_context_init_backend_apis__nix(ma_context* pContext) +{ + (void)pContext; + + return MA_SUCCESS; +} +#endif + +static ma_result ma_context_init_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_init_backend_apis__win32(pContext); +#else + result = ma_context_init_backend_apis__nix(pContext); +#endif + + return result; +} + +static ma_result ma_context_uninit_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_uninit_backend_apis__win32(pContext); +#else + result = ma_context_uninit_backend_apis__nix(pContext); +#endif + + return result; +} + + +/* The default capacity doesn't need to be too big. */ +#ifndef MA_DEFAULT_DEVICE_JOB_QUEUE_CAPACITY +#define MA_DEFAULT_DEVICE_JOB_QUEUE_CAPACITY 32 +#endif + +MA_API ma_device_job_thread_config ma_device_job_thread_config_init(void) +{ + ma_device_job_thread_config config; + + MA_ZERO_OBJECT(&config); + config.noThread = MA_FALSE; + config.jobQueueCapacity = MA_DEFAULT_DEVICE_JOB_QUEUE_CAPACITY; + config.jobQueueFlags = 0; + + return config; +} + + +static ma_thread_result MA_THREADCALL ma_device_job_thread_entry(void* pUserData) +{ + ma_device_job_thread* pJobThread = (ma_device_job_thread*)pUserData; + MA_ASSERT(pJobThread != NULL); + + for (;;) { + ma_result result; + ma_job job; + + result = ma_device_job_thread_next(pJobThread, &job); + if (result != MA_SUCCESS) { + break; + } + + if (job.toc.breakup.code == MA_JOB_TYPE_QUIT) { + break; + } + + ma_job_process(&job); + } + + return (ma_thread_result)0; +} + +MA_API ma_result ma_device_job_thread_init(const ma_device_job_thread_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_device_job_thread* pJobThread) +{ + ma_result result; + ma_job_queue_config jobQueueConfig; + + if (pJobThread == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pJobThread); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + + /* Initialize the job queue before the thread to ensure it's in a valid state. */ + jobQueueConfig = ma_job_queue_config_init(pConfig->jobQueueFlags, pConfig->jobQueueCapacity); + + result = ma_job_queue_init(&jobQueueConfig, pAllocationCallbacks, &pJobThread->jobQueue); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize job queue. */ + } + + + /* The thread needs to be initialized after the job queue to ensure the thread doesn't try to access it prematurely. */ + if (pConfig->noThread == MA_FALSE) { + result = ma_thread_create(&pJobThread->thread, ma_thread_priority_normal, 0, ma_device_job_thread_entry, pJobThread, pAllocationCallbacks); + if (result != MA_SUCCESS) { + ma_job_queue_uninit(&pJobThread->jobQueue, pAllocationCallbacks); + return result; /* Failed to create the job thread. */ + } + + pJobThread->_hasThread = MA_TRUE; + } else { + pJobThread->_hasThread = MA_FALSE; + } + + + return MA_SUCCESS; +} + +MA_API void ma_device_job_thread_uninit(ma_device_job_thread* pJobThread, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pJobThread == NULL) { + return; + } + + /* The first thing to do is post a quit message to the job queue. If we're using a thread we'll need to wait for it. */ + { + ma_job job = ma_job_init(MA_JOB_TYPE_QUIT); + ma_device_job_thread_post(pJobThread, &job); + } + + /* Wait for the thread to terminate naturally. */ + if (pJobThread->_hasThread) { + ma_thread_wait(&pJobThread->thread); + } + + /* At this point the thread should be terminated so we can safely uninitialize the job queue. */ + ma_job_queue_uninit(&pJobThread->jobQueue, pAllocationCallbacks); +} + +MA_API ma_result ma_device_job_thread_post(ma_device_job_thread* pJobThread, const ma_job* pJob) +{ + if (pJobThread == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_post(&pJobThread->jobQueue, pJob); +} + +MA_API ma_result ma_device_job_thread_next(ma_device_job_thread* pJobThread, ma_job* pJob) +{ + if (pJob == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pJob); + + if (pJobThread == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_next(&pJobThread->jobQueue, pJob); +} + + + +MA_API ma_context_config ma_context_config_init(void) +{ + ma_context_config config; + MA_ZERO_OBJECT(&config); + + return config; +} + +MA_API ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext) +{ + ma_result result; + ma_context_config defaultConfig; + ma_backend defaultBackends[ma_backend_null+1]; + ma_uint32 iBackend; + ma_backend* pBackendsToIterate; + ma_uint32 backendsToIterateCount; + + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pContext); + + /* Always make sure the config is set first to ensure properties are available as soon as possible. */ + if (pConfig == NULL) { + defaultConfig = ma_context_config_init(); + pConfig = &defaultConfig; + } + + /* Allocation callbacks need to come first because they'll be passed around to other areas. */ + result = ma_allocation_callbacks_init_copy(&pContext->allocationCallbacks, &pConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + /* Get a lot set up first so we can start logging ASAP. */ + if (pConfig->pLog != NULL) { + pContext->pLog = pConfig->pLog; + } else { + result = ma_log_init(&pContext->allocationCallbacks, &pContext->log); + if (result == MA_SUCCESS) { + pContext->pLog = &pContext->log; + } else { + pContext->pLog = NULL; /* Logging is not available. */ + } + } + + pContext->threadPriority = pConfig->threadPriority; + pContext->threadStackSize = pConfig->threadStackSize; + pContext->pUserData = pConfig->pUserData; + + /* Backend APIs need to be initialized first. This is where external libraries will be loaded and linked. */ + result = ma_context_init_backend_apis(pContext); + if (result != MA_SUCCESS) { + return result; + } + + for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { + defaultBackends[iBackend] = (ma_backend)iBackend; + } + + pBackendsToIterate = (ma_backend*)backends; + backendsToIterateCount = backendCount; + if (pBackendsToIterate == NULL) { + pBackendsToIterate = (ma_backend*)defaultBackends; + backendsToIterateCount = ma_countof(defaultBackends); + } + + MA_ASSERT(pBackendsToIterate != NULL); + + for (iBackend = 0; iBackend < backendsToIterateCount; iBackend += 1) { + ma_backend backend = pBackendsToIterate[iBackend]; + + /* Make sure all callbacks are reset so we don't accidentally drag in any from previously failed initialization attempts. */ + MA_ZERO_OBJECT(&pContext->callbacks); + + /* These backends are using the new callback system. */ + switch (backend) { +#ifdef MA_HAS_WASAPI + case ma_backend_wasapi: + { + pContext->callbacks.onContextInit = ma_context_init__wasapi; + } break; +#endif +#ifdef MA_HAS_DSOUND + case ma_backend_dsound: + { + pContext->callbacks.onContextInit = ma_context_init__dsound; + } break; +#endif +#ifdef MA_HAS_WINMM + case ma_backend_winmm: + { + pContext->callbacks.onContextInit = ma_context_init__winmm; + } break; +#endif +#ifdef MA_HAS_COREAUDIO + case ma_backend_coreaudio: + { + pContext->callbacks.onContextInit = ma_context_init__coreaudio; + } break; +#endif +#ifdef MA_HAS_SNDIO + case ma_backend_sndio: + { + pContext->callbacks.onContextInit = ma_context_init__sndio; + } break; +#endif +#ifdef MA_HAS_AUDIO4 + case ma_backend_audio4: + { + pContext->callbacks.onContextInit = ma_context_init__audio4; + } break; +#endif +#ifdef MA_HAS_OSS + case ma_backend_oss: + { + pContext->callbacks.onContextInit = ma_context_init__oss; + } break; +#endif +#ifdef MA_HAS_PULSEAUDIO + case ma_backend_pulseaudio: + { + pContext->callbacks.onContextInit = ma_context_init__pulse; + } break; +#endif +#ifdef MA_HAS_ALSA + case ma_backend_alsa: + { + pContext->callbacks.onContextInit = ma_context_init__alsa; + } break; +#endif +#ifdef MA_HAS_JACK + case ma_backend_jack: + { + pContext->callbacks.onContextInit = ma_context_init__jack; + } break; +#endif +#ifdef MA_HAS_AAUDIO + case ma_backend_aaudio: + { + if (ma_is_backend_enabled(backend)) { + pContext->callbacks.onContextInit = ma_context_init__aaudio; + } + } break; +#endif +#ifdef MA_HAS_OPENSL + case ma_backend_opensl: + { + if (ma_is_backend_enabled(backend)) { + pContext->callbacks.onContextInit = ma_context_init__opensl; + } + } break; +#endif +#ifdef MA_HAS_WEBAUDIO + case ma_backend_webaudio: + { + pContext->callbacks.onContextInit = ma_context_init__webaudio; + } break; +#endif +#ifdef MA_HAS_CUSTOM + case ma_backend_custom: + { + /* Slightly different logic for custom backends. Custom backends can optionally set all of their callbacks in the config. */ + pContext->callbacks = pConfig->custom; + } break; +#endif +#ifdef MA_HAS_NULL + case ma_backend_null: + { + pContext->callbacks.onContextInit = ma_context_init__null; + } break; +#endif + + default: break; + } + + if (pContext->callbacks.onContextInit != NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "Attempting to initialize %s backend...\n", ma_get_backend_name(backend)); + result = pContext->callbacks.onContextInit(pContext, pConfig, &pContext->callbacks); + } else { + /* Getting here means the onContextInit callback is not set which means the backend is not enabled. Special case for the custom backend. */ + if (backend != ma_backend_custom) { + result = MA_BACKEND_NOT_ENABLED; + } else { +#if !defined(MA_HAS_CUSTOM) + result = MA_BACKEND_NOT_ENABLED; +#else + result = MA_NO_BACKEND; +#endif + } + } + + /* If this iteration was successful, return. */ + if (result == MA_SUCCESS) { + result = ma_mutex_init(&pContext->deviceEnumLock); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.\n"); + } + + result = ma_mutex_init(&pContext->deviceInfoLock); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.\n"); + } + + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "System Architecture:\n"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " Endian: %s\n", ma_is_little_endian() ? "LE" : "BE"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " SSE2: %s\n", ma_has_sse2() ? "YES" : "NO"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " AVX2: %s\n", ma_has_avx2() ? "YES" : "NO"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " NEON: %s\n", ma_has_neon() ? "YES" : "NO"); + + pContext->backend = backend; + return result; + } else { + if (result == MA_BACKEND_NOT_ENABLED) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "%s backend is disabled.\n", ma_get_backend_name(backend)); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "Failed to initialize %s backend.\n", ma_get_backend_name(backend)); + } + } + } + + /* If we get here it means an error occurred. */ + MA_ZERO_OBJECT(pContext); /* Safety. */ + return MA_NO_BACKEND; +} + +MA_API ma_result ma_context_uninit(ma_context* pContext) +{ + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + if (pContext->callbacks.onContextUninit != NULL) { + pContext->callbacks.onContextUninit(pContext); + } + + ma_mutex_uninit(&pContext->deviceEnumLock); + ma_mutex_uninit(&pContext->deviceInfoLock); + ma_free(pContext->pDeviceInfos, &pContext->allocationCallbacks); + ma_context_uninit_backend_apis(pContext); + + if (pContext->pLog == &pContext->log) { + ma_log_uninit(&pContext->log); + } + + return MA_SUCCESS; +} + +MA_API size_t ma_context_sizeof(void) +{ + return sizeof(ma_context); +} + + +MA_API ma_log* ma_context_get_log(ma_context* pContext) +{ + if (pContext == NULL) { + return NULL; + } + + return pContext->pLog; +} + + +MA_API ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result; + + if (pContext == NULL || callback == NULL) { + return MA_INVALID_ARGS; + } + + if (pContext->callbacks.onContextEnumerateDevices == NULL) { + return MA_INVALID_OPERATION; + } + + ma_mutex_lock(&pContext->deviceEnumLock); + { + result = pContext->callbacks.onContextEnumerateDevices(pContext, callback, pUserData); + } + ma_mutex_unlock(&pContext->deviceEnumLock); + + return result; +} + + +static ma_bool32 ma_context_get_devices__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) +{ + /* + We need to insert the device info into our main internal buffer. Where it goes depends on the device type. If it's a capture device + it's just appended to the end. If it's a playback device it's inserted just before the first capture device. + */ + + /* + First make sure we have room. Since the number of devices we add to the list is usually relatively small I've decided to use a + simple fixed size increment for buffer expansion. + */ + const ma_uint32 bufferExpansionCount = 2; + const ma_uint32 totalDeviceInfoCount = pContext->playbackDeviceInfoCount + pContext->captureDeviceInfoCount; + + if (totalDeviceInfoCount >= pContext->deviceInfoCapacity) { + ma_uint32 newCapacity = pContext->deviceInfoCapacity + bufferExpansionCount; + ma_device_info* pNewInfos = (ma_device_info*)ma_realloc(pContext->pDeviceInfos, sizeof(*pContext->pDeviceInfos)*newCapacity, &pContext->allocationCallbacks); + if (pNewInfos == NULL) { + return MA_FALSE; /* Out of memory. */ + } + + pContext->pDeviceInfos = pNewInfos; + pContext->deviceInfoCapacity = newCapacity; + } + + if (deviceType == ma_device_type_playback) { + /* Playback. Insert just before the first capture device. */ + + /* The first thing to do is move all of the capture devices down a slot. */ + ma_uint32 iFirstCaptureDevice = pContext->playbackDeviceInfoCount; + size_t iCaptureDevice; + for (iCaptureDevice = totalDeviceInfoCount; iCaptureDevice > iFirstCaptureDevice; --iCaptureDevice) { + pContext->pDeviceInfos[iCaptureDevice] = pContext->pDeviceInfos[iCaptureDevice-1]; + } + + /* Now just insert where the first capture device was before moving it down a slot. */ + pContext->pDeviceInfos[iFirstCaptureDevice] = *pInfo; + pContext->playbackDeviceInfoCount += 1; + } else { + /* Capture. Insert at the end. */ + pContext->pDeviceInfos[totalDeviceInfoCount] = *pInfo; + pContext->captureDeviceInfoCount += 1; + } + + (void)pUserData; + return MA_TRUE; +} + +MA_API ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount) +{ + ma_result result; + + /* Safety. */ + if (ppPlaybackDeviceInfos != NULL) *ppPlaybackDeviceInfos = NULL; + if (pPlaybackDeviceCount != NULL) *pPlaybackDeviceCount = 0; + if (ppCaptureDeviceInfos != NULL) *ppCaptureDeviceInfos = NULL; + if (pCaptureDeviceCount != NULL) *pCaptureDeviceCount = 0; + + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + if (pContext->callbacks.onContextEnumerateDevices == NULL) { + return MA_INVALID_OPERATION; + } + + /* Note that we don't use ma_context_enumerate_devices() here because we want to do locking at a higher level. */ + ma_mutex_lock(&pContext->deviceEnumLock); + { + /* Reset everything first. */ + pContext->playbackDeviceInfoCount = 0; + pContext->captureDeviceInfoCount = 0; + + /* Now enumerate over available devices. */ + result = pContext->callbacks.onContextEnumerateDevices(pContext, ma_context_get_devices__enum_callback, NULL); + if (result == MA_SUCCESS) { + /* Playback devices. */ + if (ppPlaybackDeviceInfos != NULL) { + *ppPlaybackDeviceInfos = pContext->pDeviceInfos; + } + if (pPlaybackDeviceCount != NULL) { + *pPlaybackDeviceCount = pContext->playbackDeviceInfoCount; + } + + /* Capture devices. */ + if (ppCaptureDeviceInfos != NULL) { + *ppCaptureDeviceInfos = pContext->pDeviceInfos; + /* Capture devices come after playback devices. */ + if (pContext->playbackDeviceInfoCount > 0) { + /* Conditional, because NULL+0 is undefined behavior. */ + *ppCaptureDeviceInfos += pContext->playbackDeviceInfoCount; + } + } + if (pCaptureDeviceCount != NULL) { + *pCaptureDeviceCount = pContext->captureDeviceInfoCount; + } + } + } + ma_mutex_unlock(&pContext->deviceEnumLock); + + return result; +} + +MA_API ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result; + ma_device_info deviceInfo; + + /* NOTE: Do not clear pDeviceInfo on entry. The reason is the pDeviceID may actually point to pDeviceInfo->id which will break things. */ + if (pContext == NULL || pDeviceInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* Help the backend out by copying over the device ID if we have one. */ + if (pDeviceID != NULL) { + MA_COPY_MEMORY(&deviceInfo.id, pDeviceID, sizeof(*pDeviceID)); + } + + if (pContext->callbacks.onContextGetDeviceInfo == NULL) { + return MA_INVALID_OPERATION; + } + + ma_mutex_lock(&pContext->deviceInfoLock); + { + result = pContext->callbacks.onContextGetDeviceInfo(pContext, deviceType, pDeviceID, &deviceInfo); + } + ma_mutex_unlock(&pContext->deviceInfoLock); + + *pDeviceInfo = deviceInfo; + return result; +} + +MA_API ma_bool32 ma_context_is_loopback_supported(ma_context* pContext) +{ + if (pContext == NULL) { + return MA_FALSE; + } + + return ma_is_loopback_supported(pContext->backend); +} + + +MA_API ma_device_config ma_device_config_init(ma_device_type deviceType) +{ + ma_device_config config; + MA_ZERO_OBJECT(&config); + config.deviceType = deviceType; + config.resampling = ma_resampler_config_init(ma_format_unknown, 0, 0, 0, ma_resample_algorithm_linear); /* Format/channels/rate don't matter here. */ + + return config; +} + +MA_API ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_device_descriptor descriptorPlayback; + ma_device_descriptor descriptorCapture; + + /* The context can be null, in which case we self-manage it. */ + if (pContext == NULL) { + return ma_device_init_ex(NULL, 0, NULL, pConfig, pDevice); + } + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDevice); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Check that we have our callbacks defined. */ + if (pContext->callbacks.onDeviceInit == NULL) { + return MA_INVALID_OPERATION; + } + + /* Basic config validation. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + if (pConfig->capture.channels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + if (!ma__is_channel_map_valid(pConfig->capture.pChannelMap, pConfig->capture.channels)) { + return MA_INVALID_ARGS; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + if (pConfig->playback.channels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + if (!ma__is_channel_map_valid(pConfig->playback.pChannelMap, pConfig->playback.channels)) { + return MA_INVALID_ARGS; + } + } + + pDevice->pContext = pContext; + + /* Set the user data and log callback ASAP to ensure it is available for the entire initialization process. */ + pDevice->pUserData = pConfig->pUserData; + pDevice->onData = pConfig->dataCallback; + pDevice->onNotification = pConfig->notificationCallback; + pDevice->onStop = pConfig->stopCallback; + + if (pConfig->playback.pDeviceID != NULL) { + MA_COPY_MEMORY(&pDevice->playback.id, pConfig->playback.pDeviceID, sizeof(pDevice->playback.id)); + pDevice->playback.pID = &pDevice->playback.id; + } else { + pDevice->playback.pID = NULL; + } + + if (pConfig->capture.pDeviceID != NULL) { + MA_COPY_MEMORY(&pDevice->capture.id, pConfig->capture.pDeviceID, sizeof(pDevice->capture.id)); + pDevice->capture.pID = &pDevice->capture.id; + } else { + pDevice->capture.pID = NULL; + } + + pDevice->noPreSilencedOutputBuffer = pConfig->noPreSilencedOutputBuffer; + pDevice->noClip = pConfig->noClip; + pDevice->noDisableDenormals = pConfig->noDisableDenormals; + pDevice->noFixedSizedCallback = pConfig->noFixedSizedCallback; + ma_atomic_float_set(&pDevice->masterVolumeFactor, 1); + + pDevice->type = pConfig->deviceType; + pDevice->sampleRate = pConfig->sampleRate; + pDevice->resampling.algorithm = pConfig->resampling.algorithm; + pDevice->resampling.linear.lpfOrder = pConfig->resampling.linear.lpfOrder; + pDevice->resampling.pBackendVTable = pConfig->resampling.pBackendVTable; + pDevice->resampling.pBackendUserData = pConfig->resampling.pBackendUserData; + + pDevice->capture.shareMode = pConfig->capture.shareMode; + pDevice->capture.format = pConfig->capture.format; + pDevice->capture.channels = pConfig->capture.channels; + ma_channel_map_copy_or_default(pDevice->capture.channelMap, ma_countof(pDevice->capture.channelMap), pConfig->capture.pChannelMap, pConfig->capture.channels); + pDevice->capture.channelMixMode = pConfig->capture.channelMixMode; + pDevice->capture.calculateLFEFromSpatialChannels = pConfig->capture.calculateLFEFromSpatialChannels; + + pDevice->playback.shareMode = pConfig->playback.shareMode; + pDevice->playback.format = pConfig->playback.format; + pDevice->playback.channels = pConfig->playback.channels; + ma_channel_map_copy_or_default(pDevice->playback.channelMap, ma_countof(pDevice->playback.channelMap), pConfig->playback.pChannelMap, pConfig->playback.channels); + pDevice->playback.channelMixMode = pConfig->playback.channelMixMode; + pDevice->playback.calculateLFEFromSpatialChannels = pConfig->playback.calculateLFEFromSpatialChannels; + + result = ma_mutex_init(&pDevice->startStopLock); + if (result != MA_SUCCESS) { + return result; + } + + /* + When the device is started, the worker thread is the one that does the actual startup of the backend device. We + use a semaphore to wait for the background thread to finish the work. The same applies for stopping the device. + + Each of these semaphores is released internally by the worker thread when the work is completed. The start + semaphore is also used to wake up the worker thread. + */ + result = ma_event_init(&pDevice->wakeupEvent); + if (result != MA_SUCCESS) { + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + + result = ma_event_init(&pDevice->startEvent); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + + result = ma_event_init(&pDevice->stopEvent); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + + + MA_ZERO_OBJECT(&descriptorPlayback); + descriptorPlayback.pDeviceID = pConfig->playback.pDeviceID; + descriptorPlayback.shareMode = pConfig->playback.shareMode; + descriptorPlayback.format = pConfig->playback.format; + descriptorPlayback.channels = pConfig->playback.channels; + descriptorPlayback.sampleRate = pConfig->sampleRate; + ma_channel_map_copy_or_default(descriptorPlayback.channelMap, ma_countof(descriptorPlayback.channelMap), pConfig->playback.pChannelMap, pConfig->playback.channels); + descriptorPlayback.periodSizeInFrames = pConfig->periodSizeInFrames; + descriptorPlayback.periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + descriptorPlayback.periodCount = pConfig->periods; + + if (descriptorPlayback.periodCount == 0) { + descriptorPlayback.periodCount = MA_DEFAULT_PERIODS; + } + + + MA_ZERO_OBJECT(&descriptorCapture); + descriptorCapture.pDeviceID = pConfig->capture.pDeviceID; + descriptorCapture.shareMode = pConfig->capture.shareMode; + descriptorCapture.format = pConfig->capture.format; + descriptorCapture.channels = pConfig->capture.channels; + descriptorCapture.sampleRate = pConfig->sampleRate; + ma_channel_map_copy_or_default(descriptorCapture.channelMap, ma_countof(descriptorCapture.channelMap), pConfig->capture.pChannelMap, pConfig->capture.channels); + descriptorCapture.periodSizeInFrames = pConfig->periodSizeInFrames; + descriptorCapture.periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + descriptorCapture.periodCount = pConfig->periods; + + if (descriptorCapture.periodCount == 0) { + descriptorCapture.periodCount = MA_DEFAULT_PERIODS; + } + + + result = pContext->callbacks.onDeviceInit(pDevice, pConfig, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + +#if 0 + /* + On output the descriptors will contain the *actual* data format of the device. We need this to know how to convert the data between + the requested format and the internal format. + */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + if (!ma_device_descriptor_is_valid(&descriptorCapture)) { + ma_device_uninit(pDevice); + return MA_INVALID_ARGS; + } + + pDevice->capture.internalFormat = descriptorCapture.format; + pDevice->capture.internalChannels = descriptorCapture.channels; + pDevice->capture.internalSampleRate = descriptorCapture.sampleRate; + ma_channel_map_copy(pDevice->capture.internalChannelMap, descriptorCapture.channelMap, descriptorCapture.channels); + pDevice->capture.internalPeriodSizeInFrames = descriptorCapture.periodSizeInFrames; + pDevice->capture.internalPeriods = descriptorCapture.periodCount; + + if (pDevice->capture.internalPeriodSizeInFrames == 0) { + pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(descriptorCapture.periodSizeInMilliseconds, descriptorCapture.sampleRate); + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + if (!ma_device_descriptor_is_valid(&descriptorPlayback)) { + ma_device_uninit(pDevice); + return MA_INVALID_ARGS; + } + + pDevice->playback.internalFormat = descriptorPlayback.format; + pDevice->playback.internalChannels = descriptorPlayback.channels; + pDevice->playback.internalSampleRate = descriptorPlayback.sampleRate; + ma_channel_map_copy(pDevice->playback.internalChannelMap, descriptorPlayback.channelMap, descriptorPlayback.channels); + pDevice->playback.internalPeriodSizeInFrames = descriptorPlayback.periodSizeInFrames; + pDevice->playback.internalPeriods = descriptorPlayback.periodCount; + + if (pDevice->playback.internalPeriodSizeInFrames == 0) { + pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(descriptorPlayback.periodSizeInMilliseconds, descriptorPlayback.sampleRate); + } + } + + + /* + The name of the device can be retrieved from device info. This may be temporary and replaced with a `ma_device_get_info(pDevice, deviceType)` instead. + For loopback devices, we need to retrieve the name of the playback device. + */ + { + ma_device_info deviceInfo; + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + result = ma_device_get_info(pDevice, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_playback : ma_device_type_capture, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (descriptorCapture.pDeviceID == NULL) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), "Capture Device", (size_t)-1); + } + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_get_info(pDevice, ma_device_type_playback, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (descriptorPlayback.pDeviceID == NULL) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), "Playback Device", (size_t)-1); + } + } + } + } + + + ma_device__post_init_setup(pDevice, pConfig->deviceType); +#endif + + result = ma_device_post_init(pDevice, pConfig->deviceType, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + ma_device_uninit(pDevice); + return result; + } + + + /* + If we're using fixed sized callbacks we'll need to make use of an intermediary buffer. Needs to + be done after post_init_setup() because we'll need access to the sample rate. + */ + if (pConfig->noFixedSizedCallback == MA_FALSE) { + /* We're using a fixed sized data callback so we'll need an intermediary buffer. */ + ma_uint32 intermediaryBufferCap = pConfig->periodSizeInFrames; + if (intermediaryBufferCap == 0) { + intermediaryBufferCap = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pDevice->sampleRate); + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + ma_uint32 intermediaryBufferSizeInBytes; + + pDevice->capture.intermediaryBufferLen = 0; + pDevice->capture.intermediaryBufferCap = intermediaryBufferCap; + if (pDevice->capture.intermediaryBufferCap == 0) { + pDevice->capture.intermediaryBufferCap = pDevice->capture.internalPeriodSizeInFrames; + } + + intermediaryBufferSizeInBytes = pDevice->capture.intermediaryBufferCap * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + + pDevice->capture.pIntermediaryBuffer = ma_malloc((size_t)intermediaryBufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->capture.pIntermediaryBuffer == NULL) { + ma_device_uninit(pDevice); + return MA_OUT_OF_MEMORY; + } + + /* Silence the buffer for safety. */ + ma_silence_pcm_frames(pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap, pDevice->capture.format, pDevice->capture.channels); + pDevice->capture.intermediaryBufferLen = pDevice->capture.intermediaryBufferCap; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_uint64 intermediaryBufferSizeInBytes; + + pDevice->playback.intermediaryBufferLen = 0; + if (pConfig->deviceType == ma_device_type_duplex) { + pDevice->playback.intermediaryBufferCap = pDevice->capture.intermediaryBufferCap; /* In duplex mode, make sure the intermediary buffer is always the same size as the capture side. */ + } else { + pDevice->playback.intermediaryBufferCap = intermediaryBufferCap; + if (pDevice->playback.intermediaryBufferCap == 0) { + pDevice->playback.intermediaryBufferCap = pDevice->playback.internalPeriodSizeInFrames; + } + } + + intermediaryBufferSizeInBytes = pDevice->playback.intermediaryBufferCap * ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + + pDevice->playback.pIntermediaryBuffer = ma_malloc((size_t)intermediaryBufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->playback.pIntermediaryBuffer == NULL) { + ma_device_uninit(pDevice); + return MA_OUT_OF_MEMORY; + } + + /* Silence the buffer for safety. */ + ma_silence_pcm_frames(pDevice->playback.pIntermediaryBuffer, pDevice->playback.intermediaryBufferCap, pDevice->playback.format, pDevice->playback.channels); + pDevice->playback.intermediaryBufferLen = 0; + } + } else { + /* Not using a fixed sized data callback so no need for an intermediary buffer. */ + } + + + /* Some backends don't require the worker thread. */ + if (!ma_context_is_backend_asynchronous(pContext)) { + /* The worker thread. */ + result = ma_thread_create(&pDevice->thread, pContext->threadPriority, pContext->threadStackSize, ma_worker_thread, pDevice, &pContext->allocationCallbacks); + if (result != MA_SUCCESS) { + ma_device_uninit(pDevice); + return result; + } + + /* Wait for the worker thread to put the device into it's stopped state for real. */ + ma_event_wait(&pDevice->stopEvent); + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_stopped); + } else { + /* + If the backend is asynchronous and the device is duplex, we'll need an intermediary ring buffer. Note that this needs to be done + after ma_device__post_init_setup(). + */ + if (ma_context_is_backend_asynchronous(pContext)) { + if (pConfig->deviceType == ma_device_type_duplex) { + result = ma_duplex_rb_init(pDevice->capture.format, pDevice->capture.channels, pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames, &pDevice->pContext->allocationCallbacks, &pDevice->duplexRB); + if (result != MA_SUCCESS) { + ma_device_uninit(pDevice); + return result; + } + } + } + + ma_device__set_state(pDevice, ma_device_state_stopped); + } + + /* Log device information. */ + { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[%s]\n", ma_get_backend_name(pDevice->pContext->backend)); + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; + ma_device_get_name(pDevice, (pDevice->type == ma_device_type_loopback) ? ma_device_type_playback : ma_device_type_capture, name, sizeof(name), NULL); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " %s (%s)\n", name, "Capture"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Format: %s -> %s\n", ma_get_format_name(pDevice->capture.internalFormat), ma_get_format_name(pDevice->capture.format)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channels: %d -> %d\n", pDevice->capture.internalChannels, pDevice->capture.channels); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Sample Rate: %d -> %d\n", pDevice->capture.internalSampleRate, pDevice->sampleRate); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Buffer Size: %d*%d (%d)\n", pDevice->capture.internalPeriodSizeInFrames, pDevice->capture.internalPeriods, (pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Conversion:\n"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Pre Format Conversion: %s\n", pDevice->capture.converter.hasPreFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Post Format Conversion: %s\n", pDevice->capture.converter.hasPostFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Routing: %s\n", pDevice->capture.converter.hasChannelConverter ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Resampling: %s\n", pDevice->capture.converter.hasResampler ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Passthrough: %s\n", pDevice->capture.converter.isPassthrough ? "YES" : "NO"); + { + char channelMapStr[1024]; + ma_channel_map_to_string(pDevice->capture.internalChannelMap, pDevice->capture.internalChannels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map In: {%s}\n", channelMapStr); + + ma_channel_map_to_string(pDevice->capture.channelMap, pDevice->capture.channels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map Out: {%s}\n", channelMapStr); + } + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; + ma_device_get_name(pDevice, ma_device_type_playback, name, sizeof(name), NULL); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " %s (%s)\n", name, "Playback"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Format: %s -> %s\n", ma_get_format_name(pDevice->playback.format), ma_get_format_name(pDevice->playback.internalFormat)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channels: %d -> %d\n", pDevice->playback.channels, pDevice->playback.internalChannels); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->playback.internalSampleRate); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Buffer Size: %d*%d (%d)\n", pDevice->playback.internalPeriodSizeInFrames, pDevice->playback.internalPeriods, (pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Conversion:\n"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Pre Format Conversion: %s\n", pDevice->playback.converter.hasPreFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Post Format Conversion: %s\n", pDevice->playback.converter.hasPostFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Routing: %s\n", pDevice->playback.converter.hasChannelConverter ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Resampling: %s\n", pDevice->playback.converter.hasResampler ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Passthrough: %s\n", pDevice->playback.converter.isPassthrough ? "YES" : "NO"); + { + char channelMapStr[1024]; + ma_channel_map_to_string(pDevice->playback.channelMap, pDevice->playback.channels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map In: {%s}\n", channelMapStr); + + ma_channel_map_to_string(pDevice->playback.internalChannelMap, pDevice->playback.internalChannels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map Out: {%s}\n", channelMapStr); + } + } + } + + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_stopped); + return MA_SUCCESS; +} + +MA_API ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_context* pContext; + ma_backend defaultBackends[ma_backend_null+1]; + ma_uint32 iBackend; + ma_backend* pBackendsToIterate; + ma_uint32 backendsToIterateCount; + ma_allocation_callbacks allocationCallbacks; + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pContextConfig != NULL) { + result = ma_allocation_callbacks_init_copy(&allocationCallbacks, &pContextConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + } else { + allocationCallbacks = ma_allocation_callbacks_init_default(); + } + + pContext = (ma_context*)ma_malloc(sizeof(*pContext), &allocationCallbacks); + if (pContext == NULL) { + return MA_OUT_OF_MEMORY; + } + + for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { + defaultBackends[iBackend] = (ma_backend)iBackend; + } + + pBackendsToIterate = (ma_backend*)backends; + backendsToIterateCount = backendCount; + if (pBackendsToIterate == NULL) { + pBackendsToIterate = (ma_backend*)defaultBackends; + backendsToIterateCount = ma_countof(defaultBackends); + } + + result = MA_NO_BACKEND; + + for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) { + /* + This is a hack for iOS. If the context config is null, there's a good chance the + `ma_device_init(NULL, &deviceConfig, pDevice);` pattern is being used. In this + case, set the session category based on the device type. + */ +#if defined(MA_APPLE_MOBILE) + ma_context_config contextConfig; + + if (pContextConfig == NULL) { + contextConfig = ma_context_config_init(); + switch (pConfig->deviceType) { + case ma_device_type_duplex: { + contextConfig.coreaudio.sessionCategory = ma_ios_session_category_play_and_record; + } break; + case ma_device_type_capture: { + contextConfig.coreaudio.sessionCategory = ma_ios_session_category_record; + } break; + case ma_device_type_playback: + default: { + contextConfig.coreaudio.sessionCategory = ma_ios_session_category_playback; + } break; + } + + pContextConfig = &contextConfig; + } +#endif + + result = ma_context_init(&pBackendsToIterate[iBackend], 1, pContextConfig, pContext); + if (result == MA_SUCCESS) { + result = ma_device_init(pContext, pConfig, pDevice); + if (result == MA_SUCCESS) { + break; /* Success. */ + } else { + ma_context_uninit(pContext); /* Failure. */ + } + } + } + + if (result != MA_SUCCESS) { + ma_free(pContext, &allocationCallbacks); + return result; + } + + pDevice->isOwnerOfContext = MA_TRUE; + return result; +} + +MA_API void ma_device_uninit(ma_device* pDevice) +{ + if (!ma_device__is_initialized(pDevice)) { + return; + } + + /* Make sure the device is stopped first. The backends will probably handle this naturally, but I like to do it explicitly for my own sanity. */ + if (ma_device_is_started(pDevice)) { + ma_device_stop(pDevice); + } + + /* Putting the device into an uninitialized state will make the worker thread return. */ + ma_device__set_state(pDevice, ma_device_state_uninitialized); + + /* Wake up the worker thread and wait for it to properly terminate. */ + if (!ma_context_is_backend_asynchronous(pDevice->pContext)) { + ma_event_signal(&pDevice->wakeupEvent); + ma_thread_wait(&pDevice->thread); + } + + if (pDevice->pContext->callbacks.onDeviceUninit != NULL) { + pDevice->pContext->callbacks.onDeviceUninit(pDevice); + } + + + ma_event_uninit(&pDevice->stopEvent); + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + if (pDevice->type == ma_device_type_duplex) { + ma_duplex_rb_uninit(&pDevice->duplexRB); + } + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + ma_data_converter_uninit(&pDevice->capture.converter, &pDevice->pContext->allocationCallbacks); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_data_converter_uninit(&pDevice->playback.converter, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->playback.pInputCache != NULL) { + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->capture.pIntermediaryBuffer != NULL) { + ma_free(pDevice->capture.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks); + } + if (pDevice->playback.pIntermediaryBuffer != NULL) { + ma_free(pDevice->playback.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->isOwnerOfContext) { + ma_allocation_callbacks allocationCallbacks = pDevice->pContext->allocationCallbacks; + + ma_context_uninit(pDevice->pContext); + ma_free(pDevice->pContext, &allocationCallbacks); + } + + MA_ZERO_OBJECT(pDevice); +} + +MA_API ma_context* ma_device_get_context(ma_device* pDevice) +{ + if (pDevice == NULL) { + return NULL; + } + + return pDevice->pContext; +} + +MA_API ma_log* ma_device_get_log(ma_device* pDevice) +{ + return ma_context_get_log(ma_device_get_context(pDevice)); +} + +MA_API ma_result ma_device_get_info(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo) +{ + if (pDeviceInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDeviceInfo); + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + /* If the onDeviceGetInfo() callback is set, use that. Otherwise we'll fall back to ma_context_get_device_info(). */ + if (pDevice->pContext->callbacks.onDeviceGetInfo != NULL) { + return pDevice->pContext->callbacks.onDeviceGetInfo(pDevice, type, pDeviceInfo); + } + + /* Getting here means onDeviceGetInfo is not implemented so we need to fall back to an alternative. */ + if (type == ma_device_type_playback) { + return ma_context_get_device_info(pDevice->pContext, type, pDevice->playback.pID, pDeviceInfo); + } else { + return ma_context_get_device_info(pDevice->pContext, type, pDevice->capture.pID, pDeviceInfo); + } +} + +MA_API ma_result ma_device_get_name(ma_device* pDevice, ma_device_type type, char* pName, size_t nameCap, size_t* pLengthNotIncludingNullTerminator) +{ + ma_result result; + ma_device_info deviceInfo; + + if (pLengthNotIncludingNullTerminator != NULL) { + *pLengthNotIncludingNullTerminator = 0; + } + + if (pName != NULL && nameCap > 0) { + pName[0] = '\0'; + } + + result = ma_device_get_info(pDevice, type, &deviceInfo); + if (result != MA_SUCCESS) { + return result; + } + + if (pName != NULL) { + ma_strncpy_s(pName, nameCap, deviceInfo.name, (size_t)-1); + + /* + For safety, make sure the length is based on the truncated output string rather than the + source. Otherwise the caller might assume the output buffer contains more content than it + actually does. + */ + if (pLengthNotIncludingNullTerminator != NULL) { + *pLengthNotIncludingNullTerminator = strlen(pName); + } + } else { + /* Name not specified. Just report the length of the source string. */ + if (pLengthNotIncludingNullTerminator != NULL) { + *pLengthNotIncludingNullTerminator = strlen(deviceInfo.name); + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_start(ma_device* pDevice) +{ + ma_result result; + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized) { + return MA_INVALID_OPERATION; /* Not initialized. */ + } + + if (ma_device_get_state(pDevice) == ma_device_state_started) { + return MA_SUCCESS; /* Already started. */ + } + + ma_mutex_lock(&pDevice->startStopLock); + { + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a stopped or paused state. */ + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_stopped); + + ma_device__set_state(pDevice, ma_device_state_starting); + + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + if (pDevice->pContext->callbacks.onDeviceStart != NULL) { + result = pDevice->pContext->callbacks.onDeviceStart(pDevice); + } else { + result = MA_INVALID_OPERATION; + } + + if (result == MA_SUCCESS) { + ma_device__set_state(pDevice, ma_device_state_started); + ma_device__on_notification_started(pDevice); + } + } else { + /* + Synchronous backends are started by signaling an event that's being waited on in the worker thread. We first wake up the + thread and then wait for the start event. + */ + ma_event_signal(&pDevice->wakeupEvent); + + /* + Wait for the worker thread to finish starting the device. Note that the worker thread will be the one who puts the device + into the started state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->startEvent); + result = pDevice->workResult; + } + + /* We changed the state from stopped to started, so if we failed, make sure we put the state back to stopped. */ + if (result != MA_SUCCESS) { + ma_device__set_state(pDevice, ma_device_state_stopped); + } + } + ma_mutex_unlock(&pDevice->startStopLock); + + return result; +} + +MA_API ma_result ma_device_stop(ma_device* pDevice) +{ + ma_result result; + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized) { + return MA_INVALID_OPERATION; /* Not initialized. */ + } + + if (ma_device_get_state(pDevice) == ma_device_state_stopped) { + return MA_SUCCESS; /* Already stopped. */ + } + + ma_mutex_lock(&pDevice->startStopLock); + { + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a started or paused state. */ + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_started); + + ma_device__set_state(pDevice, ma_device_state_stopping); + + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + /* Asynchronous backends must have a stop operation. */ + if (pDevice->pContext->callbacks.onDeviceStop != NULL) { + result = pDevice->pContext->callbacks.onDeviceStop(pDevice); + } else { + result = MA_INVALID_OPERATION; + } + + ma_device__set_state(pDevice, ma_device_state_stopped); + } else { + /* + Synchronous backends. The stop callback is always called from the worker thread. Do not call the stop callback here. If + the backend is implementing it's own audio thread loop we'll need to wake it up if required. Note that we need to make + sure the state of the device is *not* playing right now, which it shouldn't be since we set it above. This is super + important though, so I'm asserting it here as well for extra safety in case we accidentally change something later. + */ + MA_ASSERT(ma_device_get_state(pDevice) != ma_device_state_started); + + if (pDevice->pContext->callbacks.onDeviceDataLoopWakeup != NULL) { + pDevice->pContext->callbacks.onDeviceDataLoopWakeup(pDevice); + } + + /* + We need to wait for the worker thread to become available for work before returning. Note that the worker thread will be + the one who puts the device into the stopped state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->stopEvent); + result = MA_SUCCESS; + } + + /* + This is a safety measure to ensure the internal buffer has been cleared so any leftover + does not get played the next time the device starts. Ideally this should be drained by + the backend first. + */ + pDevice->playback.intermediaryBufferLen = 0; + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = 0; + } + ma_mutex_unlock(&pDevice->startStopLock); + + return result; +} + +MA_API ma_bool32 ma_device_is_started(const ma_device* pDevice) +{ + return ma_device_get_state(pDevice) == ma_device_state_started; +} + +MA_API ma_device_state ma_device_get_state(const ma_device* pDevice) +{ + if (pDevice == NULL) { + return ma_device_state_uninitialized; + } + + return ma_atomic_device_state_get((ma_atomic_device_state*)&pDevice->state); /* Naughty cast to get rid of a const warning. */ +} + +MA_API ma_result ma_device_set_master_volume(ma_device* pDevice, float volume) +{ + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (volume < 0.0f) { + return MA_INVALID_ARGS; + } + + ma_atomic_float_set(&pDevice->masterVolumeFactor, volume); + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume) +{ + if (pVolume == NULL) { + return MA_INVALID_ARGS; + } + + if (pDevice == NULL) { + *pVolume = 0; + return MA_INVALID_ARGS; + } + + *pVolume = ma_atomic_float_get(&pDevice->masterVolumeFactor); + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_set_master_volume_db(ma_device* pDevice, float gainDB) +{ + if (gainDB > 0) { + return MA_INVALID_ARGS; + } + + return ma_device_set_master_volume(pDevice, ma_volume_db_to_linear(gainDB)); +} + +MA_API ma_result ma_device_get_master_volume_db(ma_device* pDevice, float* pGainDB) +{ + float factor; + ma_result result; + + if (pGainDB == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_device_get_master_volume(pDevice, &factor); + if (result != MA_SUCCESS) { + *pGainDB = 0; + return result; + } + + *pGainDB = ma_volume_linear_to_db(factor); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_device_handle_backend_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) +{ + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (pOutput == NULL && pInput == NULL) { + return MA_INVALID_ARGS; + } + + if (pDevice->type == ma_device_type_duplex) { + if (pInput != NULL) { + ma_device__handle_duplex_callback_capture(pDevice, frameCount, pInput, &pDevice->duplexRB.rb); + } + + if (pOutput != NULL) { + ma_device__handle_duplex_callback_playback(pDevice, frameCount, pOutput, &pDevice->duplexRB.rb); + } + } else { + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_loopback) { + if (pInput == NULL) { + return MA_INVALID_ARGS; + } + + ma_device__send_frames_to_client(pDevice, frameCount, pInput); + } + + if (pDevice->type == ma_device_type_playback) { + if (pOutput == NULL) { + return MA_INVALID_ARGS; + } + + ma_device__read_frames_from_client(pDevice, frameCount, pOutput); + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_descriptor(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + if (pDescriptor == NULL) { + return 0; + } + + /* + We must have a non-0 native sample rate, but some backends don't allow retrieval of this at the + time when the size of the buffer needs to be determined. In this case we need to just take a best + guess and move on. We'll try using the sample rate in pDescriptor first. If that's not set we'll + just fall back to MA_DEFAULT_SAMPLE_RATE. + */ + if (nativeSampleRate == 0) { + nativeSampleRate = pDescriptor->sampleRate; + } + if (nativeSampleRate == 0) { + nativeSampleRate = MA_DEFAULT_SAMPLE_RATE; + } + + MA_ASSERT(nativeSampleRate != 0); + + if (pDescriptor->periodSizeInFrames == 0) { + if (pDescriptor->periodSizeInMilliseconds == 0) { + if (performanceProfile == ma_performance_profile_low_latency) { + return ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, nativeSampleRate); + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, nativeSampleRate); + } + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptor->periodSizeInMilliseconds, nativeSampleRate); + } + } else { + return pDescriptor->periodSizeInFrames; + } +} +#endif /* MA_NO_DEVICE_IO */ + + +MA_API ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate) +{ + /* Prevent a division by zero. */ + if (sampleRate == 0) { + return 0; + } + + return bufferSizeInFrames*1000 / sampleRate; +} + +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate) +{ + /* Prevent a division by zero. */ + if (sampleRate == 0) { + return 0; + } + + return bufferSizeInMilliseconds*sampleRate / 1000; +} + +MA_API void ma_copy_pcm_frames(void* dst, const void* src, ma_uint64 frameCount, ma_format format, ma_uint32 channels) +{ + if (dst == src) { + return; /* No-op. */ + } + + ma_copy_memory_64(dst, src, frameCount * ma_get_bytes_per_frame(format, channels)); +} + +MA_API void ma_silence_pcm_frames(void* p, ma_uint64 frameCount, ma_format format, ma_uint32 channels) +{ + if (format == ma_format_u8) { + ma_uint64 sampleCount = frameCount * channels; + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + ((ma_uint8*)p)[iSample] = 128; + } + } else { + ma_zero_memory_64(p, frameCount * ma_get_bytes_per_frame(format, channels)); + } +} + +MA_API void* ma_offset_pcm_frames_ptr(void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels) +{ + return ma_offset_ptr(p, offsetInFrames * ma_get_bytes_per_frame(format, channels)); +} + +MA_API const void* ma_offset_pcm_frames_const_ptr(const void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels) +{ + return ma_offset_ptr(p, offsetInFrames * ma_get_bytes_per_frame(format, channels)); +} + + +MA_API void ma_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_u8(pSrc[iSample]); + } +} + +MA_API void ma_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s16(pSrc[iSample]); + } +} + +MA_API void ma_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + ma_int64 s = ma_clip_s24(pSrc[iSample]); + pDst[iSample*3 + 0] = (ma_uint8)((s & 0x000000FF) >> 0); + pDst[iSample*3 + 1] = (ma_uint8)((s & 0x0000FF00) >> 8); + pDst[iSample*3 + 2] = (ma_uint8)((s & 0x00FF0000) >> 16); + } +} + +MA_API void ma_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s32(pSrc[iSample]); + } +} + +MA_API void ma_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_f32(pSrc[iSample]); + } +} + +MA_API void ma_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels) +{ + ma_uint64 sampleCount; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + sampleCount = frameCount * channels; + + switch (format) { + case ma_format_u8: ma_clip_samples_u8( (ma_uint8*)pDst, (const ma_int16*)pSrc, sampleCount); break; + case ma_format_s16: ma_clip_samples_s16((ma_int16*)pDst, (const ma_int32*)pSrc, sampleCount); break; + case ma_format_s24: ma_clip_samples_s24((ma_uint8*)pDst, (const ma_int64*)pSrc, sampleCount); break; + case ma_format_s32: ma_clip_samples_s32((ma_int32*)pDst, (const ma_int64*)pSrc, sampleCount); break; + case ma_format_f32: ma_clip_samples_f32(( float*)pDst, (const float*)pSrc, sampleCount); break; + + /* Do nothing if we don't know the format. We're including these here to silence a compiler warning about enums not being handled by the switch. */ + case ma_format_unknown: + case ma_format_count: + break; + } +} + + +MA_API void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_uint8)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_int16)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + ma_uint8* pSamplesOut8; + ma_uint8* pSamplesIn8; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + pSamplesOut8 = (ma_uint8*)pSamplesOut; + pSamplesIn8 = (ma_uint8*)pSamplesIn; + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + ma_int32 sampleS32; + + sampleS32 = (ma_int32)(((ma_uint32)(pSamplesIn8[iSample*3+0]) << 8) | ((ma_uint32)(pSamplesIn8[iSample*3+1]) << 16) | ((ma_uint32)(pSamplesIn8[iSample*3+2])) << 24); + sampleS32 = (ma_int32)(sampleS32 * factor); + + pSamplesOut8[iSample*3+0] = (ma_uint8)(((ma_uint32)sampleS32 & 0x0000FF00) >> 8); + pSamplesOut8[iSample*3+1] = (ma_uint8)(((ma_uint32)sampleS32 & 0x00FF0000) >> 16); + pSamplesOut8[iSample*3+2] = (ma_uint8)(((ma_uint32)sampleS32 & 0xFF000000) >> 24); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_int32)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + if (factor == 1) { + if (pSamplesOut == pSamplesIn) { + /* In place. No-op. */ + } else { + /* Just a copy. */ + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = pSamplesIn[iSample]; + } + } + } else { + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = pSamplesIn[iSample] * factor; + } + } +} + +MA_API void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_u8(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s16(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s24(void* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s24(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s32(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_f32(float* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_f32(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pFramesOut, const ma_uint8* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_u8(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pFramesOut, const ma_int16* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s16(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s24(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pFramesOut, const ma_int32* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s32(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_f32(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor) +{ + switch (format) + { + case ma_format_u8: ma_copy_and_apply_volume_factor_pcm_frames_u8 ((ma_uint8*)pFramesOut, (const ma_uint8*)pFramesIn, frameCount, channels, factor); return; + case ma_format_s16: ma_copy_and_apply_volume_factor_pcm_frames_s16((ma_int16*)pFramesOut, (const ma_int16*)pFramesIn, frameCount, channels, factor); return; + case ma_format_s24: ma_copy_and_apply_volume_factor_pcm_frames_s24( pFramesOut, pFramesIn, frameCount, channels, factor); return; + case ma_format_s32: ma_copy_and_apply_volume_factor_pcm_frames_s32((ma_int32*)pFramesOut, (const ma_int32*)pFramesIn, frameCount, channels, factor); return; + case ma_format_f32: ma_copy_and_apply_volume_factor_pcm_frames_f32( (float*)pFramesOut, (const float*)pFramesIn, frameCount, channels, factor); return; + default: return; /* Do nothing. */ + } +} + +MA_API void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_u8(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s16(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s24(void* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s24(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s32(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_f32(float* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_f32(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames(void* pFramesOut, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames(pFramesOut, pFramesOut, frameCount, format, channels, factor); +} + + +MA_API void ma_copy_and_apply_volume_factor_per_channel_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float* pChannelGains) +{ + ma_uint64 iFrame; + + if (channels == 2) { + /* TODO: Do an optimized implementation for stereo and mono. Can do a SIMD optimized implementation as well. */ + } + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOut[iFrame * channels + iChannel] = pFramesIn[iFrame * channels + iChannel] * pChannelGains[iChannel]; + } + } +} + + + +static MA_INLINE ma_int16 ma_apply_volume_unclipped_u8(ma_int16 x, ma_int16 volume) +{ + return (ma_int16)(((ma_int32)x * (ma_int32)volume) >> 8); +} + +static MA_INLINE ma_int32 ma_apply_volume_unclipped_s16(ma_int32 x, ma_int16 volume) +{ + return (ma_int32)((x * volume) >> 8); +} + +static MA_INLINE ma_int64 ma_apply_volume_unclipped_s24(ma_int64 x, ma_int16 volume) +{ + return (ma_int64)((x * volume) >> 8); +} + +static MA_INLINE ma_int64 ma_apply_volume_unclipped_s32(ma_int64 x, ma_int16 volume) +{ + return (ma_int64)((x * volume) >> 8); +} + +static MA_INLINE float ma_apply_volume_unclipped_f32(float x, float volume) +{ + return x * volume; +} + + +MA_API void ma_copy_and_apply_volume_and_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_u8(ma_apply_volume_unclipped_u8(pSrc[iSample], volumeFixed)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s16(ma_apply_volume_unclipped_s16(pSrc[iSample], volumeFixed)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + ma_int64 s = ma_clip_s24(ma_apply_volume_unclipped_s24(pSrc[iSample], volumeFixed)); + pDst[iSample*3 + 0] = (ma_uint8)((s & 0x000000FF) >> 0); + pDst[iSample*3 + 1] = (ma_uint8)((s & 0x0000FF00) >> 8); + pDst[iSample*3 + 2] = (ma_uint8)((s & 0x00FF0000) >> 16); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s32(ma_apply_volume_unclipped_s32(pSrc[iSample], volumeFixed)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + /* For the f32 case we need to make sure this supports in-place processing where the input and output buffers are the same. */ + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_f32(ma_apply_volume_unclipped_f32(pSrc[iSample], volume)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float volume) +{ + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + if (volume == 1) { + ma_clip_pcm_frames(pDst, pSrc, frameCount, format, channels); /* Optimized case for volume = 1. */ + } else if (volume == 0) { + ma_silence_pcm_frames(pDst, frameCount, format, channels); /* Optimized case for volume = 0. */ + } else { + ma_uint64 sampleCount = frameCount * channels; + + switch (format) { + case ma_format_u8: ma_copy_and_apply_volume_and_clip_samples_u8( (ma_uint8*)pDst, (const ma_int16*)pSrc, sampleCount, volume); break; + case ma_format_s16: ma_copy_and_apply_volume_and_clip_samples_s16((ma_int16*)pDst, (const ma_int32*)pSrc, sampleCount, volume); break; + case ma_format_s24: ma_copy_and_apply_volume_and_clip_samples_s24((ma_uint8*)pDst, (const ma_int64*)pSrc, sampleCount, volume); break; + case ma_format_s32: ma_copy_and_apply_volume_and_clip_samples_s32((ma_int32*)pDst, (const ma_int64*)pSrc, sampleCount, volume); break; + case ma_format_f32: ma_copy_and_apply_volume_and_clip_samples_f32(( float*)pDst, (const float*)pSrc, sampleCount, volume); break; + + /* Do nothing if we don't know the format. We're including these here to silence a compiler warning about enums not being handled by the switch. */ + case ma_format_unknown: + case ma_format_count: + break; + } + } +} + + + +MA_API float ma_volume_linear_to_db(float factor) +{ + return 20*ma_log10f(factor); +} + +MA_API float ma_volume_db_to_linear(float gain) +{ + return ma_powf(10, gain/20.0f); +} + + +MA_API ma_result ma_mix_pcm_frames_f32(float* pDst, const float* pSrc, ma_uint64 frameCount, ma_uint32 channels, float volume) +{ + ma_uint64 iSample; + ma_uint64 sampleCount; + + if (pDst == NULL || pSrc == NULL || channels == 0) { + return MA_INVALID_ARGS; + } + + if (volume == 0) { + return MA_SUCCESS; /* No changes if the volume is 0. */ + } + + sampleCount = frameCount * channels; + + if (volume == 1) { + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pDst[iSample] += pSrc[iSample]; + } + } else { + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pDst[iSample] += ma_apply_volume_unclipped_f32(pSrc[iSample], volume); + } + } + + return MA_SUCCESS; +} + + + +/************************************************************************************************************************************************************** + +Format Conversion + +**************************************************************************************************************************************************************/ + +static MA_INLINE ma_int16 ma_pcm_sample_f32_to_s16(float x) +{ + return (ma_int16)(x * 32767.0f); +} + +static MA_INLINE ma_int16 ma_pcm_sample_u8_to_s16_no_scale(ma_uint8 x) +{ + return (ma_int16)((ma_int16)x - 128); +} + +static MA_INLINE ma_int64 ma_pcm_sample_s24_to_s32_no_scale(const ma_uint8* x) +{ + return (ma_int64)(((ma_uint64)x[0] << 40) | ((ma_uint64)x[1] << 48) | ((ma_uint64)x[2] << 56)) >> 40; /* Make sure the sign bits are maintained. */ +} + +static MA_INLINE void ma_pcm_sample_s32_to_s24_no_scale(ma_int64 x, ma_uint8* s24) +{ + s24[0] = (ma_uint8)((x & 0x000000FF) >> 0); + s24[1] = (ma_uint8)((x & 0x0000FF00) >> 8); + s24[2] = (ma_uint8)((x & 0x00FF0000) >> 16); +} + + +/* u8 */ +MA_API void ma_pcm_u8_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_uint8)); +} + + +static MA_INLINE void ma_pcm_u8_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_u8[i]; + x = (ma_int16)(x - 128); + x = (ma_int16)(x << 8); + dst_s16[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_s16__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_s16__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_u8[i]; + x = (ma_int16)(x - 128); + + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = 0; + dst_s24[i*3+2] = (ma_uint8)((ma_int8)x); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_s24__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_s24__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_u8[i]; + x = x - 128; + x = x << 24; + dst_s32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_s32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_s32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)src_u8[i]; + x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_f32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_f32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_interleave_u8__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + } + } +} +#else +static MA_INLINE void ma_pcm_interleave_u8__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; + + if (channels == 1) { + ma_copy_memory_64(dst, src[0], frameCount * sizeof(ma_uint8)); + } else if (channels == 2) { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + dst_u8[iFrame*2 + 0] = src_u8[0][iFrame]; + dst_u8[iFrame*2 + 1] = src_u8[1][iFrame]; + } + } else { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + } + } + } +} +#endif + +MA_API void ma_pcm_interleave_u8(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_u8__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_u8__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_u8__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8** dst_u8 = (ma_uint8**)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iChannel][iFrame] = src_u8[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_u8__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_u8(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_u8__optimized(dst, src, frameCount, channels); +#endif +} + + +/* s16 */ +static MA_INLINE void ma_pcm_s16_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; + x = (ma_int16)(x >> 8); + x = (ma_int16)(x + 128); + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x80, 0x7F); + if ((x + dither) <= 0x7FFF) { + x = (ma_int16)(x + dither); + } else { + x = 0x7FFF; + } + + x = (ma_int16)(x >> 8); + x = (ma_int16)(x + 128); + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s16_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_u8__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_u8__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s16_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_int16)); +} + + +static MA_INLINE void ma_pcm_s16_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = (ma_uint8)(src_s16[i] & 0xFF); + dst_s24[i*3+2] = (ma_uint8)(src_s16[i] >> 8); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_s24__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_s24__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s16_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = src_s16[i] << 16; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_s32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_s32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s16_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)src_s16[i]; + +#if 0 + /* The accurate way. */ + x = x + 32768.0f; /* -32768..32767 to 0..65535 */ + x = x * 0.00003051804379339284f; /* 0..65535 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.000030517578125f; /* -32768..32767 to -1..0.999969482421875 */ +#endif + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_f32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_f32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s16__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int16** src_s16 = (const ma_int16**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iFrame*channels + iChannel] = src_s16[iChannel][iFrame]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s16__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s16(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s16__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s16__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16** dst_s16 = (ma_int16**)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iChannel][iFrame] = src_s16[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s16__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s16(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s16__optimized(dst, src, frameCount, channels); +#endif +} + + +/* s24 */ +static MA_INLINE void ma_pcm_s24_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_u8[i] = (ma_uint8)((ma_int8)src_s24[i*3 + 2] + 128); + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s24_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_u8__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_u8__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s24_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint16 dst_lo = ((ma_uint16)src_s24[i*3 + 1]); + ma_uint16 dst_hi = (ma_uint16)((ma_uint16)src_s24[i*3 + 2] << 8); + dst_s16[i] = (ma_int16)(dst_lo | dst_hi); + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } +} + +static MA_INLINE void ma_pcm_s24_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_s16__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_s16__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s24_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * 3); +} + + +static MA_INLINE void ma_pcm_s24_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s24_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_s32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_s32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)(((ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24)) >> 8); + +#if 0 + /* The accurate way. */ + x = x + 8388608.0f; /* -8388608..8388607 to 0..16777215 */ + x = x * 0.00000011920929665621f; /* 0..16777215 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.00000011920928955078125f; /* -8388608..8388607 to -1..0.999969482421875 */ +#endif + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s24_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_f32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_f32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s24__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst8 = (ma_uint8*)dst; + const ma_uint8** src8 = (const ma_uint8**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iFrame*3*channels + iChannel*3 + 0] = src8[iChannel][iFrame*3 + 0]; + dst8[iFrame*3*channels + iChannel*3 + 1] = src8[iChannel][iFrame*3 + 1]; + dst8[iFrame*3*channels + iChannel*3 + 2] = src8[iChannel][iFrame*3 + 2]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s24__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s24(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s24__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s24__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8** dst8 = (ma_uint8**)dst; + const ma_uint8* src8 = (const ma_uint8*)src; + + ma_uint32 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iChannel][iFrame*3 + 0] = src8[iFrame*3*channels + iChannel*3 + 0]; + dst8[iChannel][iFrame*3 + 1] = src8[iFrame*3*channels + iChannel*3 + 1]; + dst8[iChannel][iFrame*3 + 2] = src8[iFrame*3*channels + iChannel*3 + 2]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s24__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s24(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s24__optimized(dst, src, frameCount, channels); +#endif +} + + + +/* s32 */ +static MA_INLINE void ma_pcm_s32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_u8__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_u8__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } +} + +static MA_INLINE void ma_pcm_s32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_s16__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_s16__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint32 x = (ma_uint32)src_s32[i]; + dst_s24[i*3+0] = (ma_uint8)((x & 0x0000FF00) >> 8); + dst_s24[i*3+1] = (ma_uint8)((x & 0x00FF0000) >> 16); + dst_s24[i*3+2] = (ma_uint8)((x & 0xFF000000) >> 24); + } + + (void)ditherMode; /* No dithering for s32 -> s24. */ +} + +static MA_INLINE void ma_pcm_s32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_s24__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_s24__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * sizeof(ma_int32)); +} + + +static MA_INLINE void ma_pcm_s32_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + double x = src_s32[i]; + +#if 0 + x = x + 2147483648.0; + x = x * 0.0000000004656612873077392578125; + x = x - 1; +#else + x = x / 2147483648.0; +#endif + + dst_f32[i] = (float)x; + } + + (void)ditherMode; /* No dithering for s32 -> f32. */ +} + +static MA_INLINE void ma_pcm_s32_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_f32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_f32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int32** src_s32 = (const ma_int32**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iFrame*channels + iChannel] = src_s32[iChannel][iFrame]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s32__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32** dst_s32 = (ma_int32**)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iChannel][iFrame] = src_s32[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s32__optimized(dst, src, frameCount, channels); +#endif +} + + +/* f32 */ +static MA_INLINE void ma_pcm_f32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + + ma_uint8* dst_u8 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -128; + ditherMax = 1.0f / 127; + } + + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 127.5f; /* 0..2 to 0..255 */ + + dst_u8[i] = (ma_uint8)x; + } +} + +static MA_INLINE void ma_pcm_f32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_u8__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_u8__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_f32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 32767.5f; /* 0..2 to 0..65535 */ + x = x - 32768.0f; /* 0...65535 to -32768..32767 */ +#else + /* The fast way. */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ +#endif + + dst_s16[i] = (ma_int16)x; + } +} +#else +static MA_INLINE void ma_pcm_f32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i4; + ma_uint64 count4; + + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + /* Unrolled. */ + i = 0; + count4 = count >> 2; + for (i4 = 0; i4 < count4; i4 += 1) { + float d0 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d1 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d2 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d3 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + + float x0 = src_f32[i+0]; + float x1 = src_f32[i+1]; + float x2 = src_f32[i+2]; + float x3 = src_f32[i+3]; + + x0 = x0 + d0; + x1 = x1 + d1; + x2 = x2 + d2; + x3 = x3 + d3; + + x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0)); + x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1)); + x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2)); + x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3)); + + x0 = x0 * 32767.0f; + x1 = x1 * 32767.0f; + x2 = x2 * 32767.0f; + x3 = x3 * 32767.0f; + + dst_s16[i+0] = (ma_int16)x0; + dst_s16[i+1] = (ma_int16)x1; + dst_s16[i+2] = (ma_int16)x2; + dst_s16[i+3] = (ma_int16)x3; + + i += 4; + } + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; + + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; + + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; + + /* SSE2. SSE allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + __m128 d0; + __m128 d1; + __m128 x0; + __m128 x1; + + if (ditherMode == ma_dither_mode_none) { + d0 = _mm_set1_ps(0); + d1 = _mm_set1_ps(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + d0 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + } else { + d0 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + } + + x0 = *((__m128*)(src_f32 + i) + 0); + x1 = *((__m128*)(src_f32 + i) + 1); + + x0 = _mm_add_ps(x0, d0); + x1 = _mm_add_ps(x1, d1); + + x0 = _mm_mul_ps(x0, _mm_set1_ps(32767.0f)); + x1 = _mm_mul_ps(x1, _mm_set1_ps(32767.0f)); + + _mm_stream_si128(((__m128i*)(dst_s16 + i)), _mm_packs_epi32(_mm_cvttps_epi32(x0), _mm_cvttps_epi32(x1))); + + i += 8; + } + + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} +#endif /* SSE2 */ + +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; + + if (!ma_has_neon()) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; + + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; + + /* NEON. NEON allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + float32x4_t d0; + float32x4_t d1; + float32x4_t x0; + float32x4_t x1; + int32x4_t i0; + int32x4_t i1; + + if (ditherMode == ma_dither_mode_none) { + d0 = vmovq_n_f32(0); + d1 = vmovq_n_f32(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + float d0v[4]; + float d1v[4]; + + d0v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); + + d1v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); + } else { + float d0v[4]; + float d1v[4]; + + d0v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); + + d1v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); + } + + x0 = *((float32x4_t*)(src_f32 + i) + 0); + x1 = *((float32x4_t*)(src_f32 + i) + 1); + + x0 = vaddq_f32(x0, d0); + x1 = vaddq_f32(x1, d1); + + x0 = vmulq_n_f32(x0, 32767.0f); + x1 = vmulq_n_f32(x1, 32767.0f); + + i0 = vcvtq_s32_f32(x0); + i1 = vcvtq_s32_f32(x1); + *((int16x8_t*)(dst_s16 + i)) = vcombine_s16(vqmovn_s32(i0), vqmovn_s32(i1)); + + i += 8; + } + + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} +#endif /* Neon */ +#endif /* MA_USE_REFERENCE_CONVERSION_APIS */ + +MA_API void ma_pcm_f32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s16__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_s16__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_s16__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_f32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 r; + float x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 8388607.5f; /* 0..2 to 0..16777215 */ + x = x - 8388608.0f; /* 0..16777215 to -8388608..8388607 */ +#else + /* The fast way. */ + x = x * 8388607.0f; /* -1..1 to -8388607..8388607 */ +#endif + + r = (ma_int32)x; + dst_s24[(i*3)+0] = (ma_uint8)((r & 0x0000FF) >> 0); + dst_s24[(i*3)+1] = (ma_uint8)((r & 0x00FF00) >> 8); + dst_s24[(i*3)+2] = (ma_uint8)((r & 0xFF0000) >> 16); + } + + (void)ditherMode; /* No dithering for f32 -> s24. */ +} + +static MA_INLINE void ma_pcm_f32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_s24__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_s24__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_f32_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const float* src_f32 = (const float*)src; + + ma_uint32 i; + for (i = 0; i < count; i += 1) { + double x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 2147483647.5; /* 0..2 to 0..4294967295 */ + x = x - 2147483648.0; /* 0...4294967295 to -2147483648..2147483647 */ +#else + /* The fast way. */ + x = x * 2147483647.0; /* -1..1 to -2147483647..2147483647 */ +#endif + + dst_s32[i] = (ma_int32)x; + } + + (void)ditherMode; /* No dithering for f32 -> s32. */ +} + +static MA_INLINE void ma_pcm_f32_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); +#else +# if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_s32__sse2(dst, src, count, ditherMode); + } else +#elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_s32__neon(dst, src, count, ditherMode); + } else +#endif + { + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_f32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * sizeof(float)); +} + + +static void ma_pcm_interleave_f32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + float* dst_f32 = (float*)dst; + const float** src_f32 = (const float**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iFrame*channels + iChannel] = src_f32[iChannel][iFrame]; + } + } +} + +static void ma_pcm_interleave_f32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_f32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_f32__optimized(dst, src, frameCount, channels); +#endif +} + + +static void ma_pcm_deinterleave_f32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + float** dst_f32 = (float**)dst; + const float* src_f32 = (const float*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iChannel][iFrame] = src_f32[iFrame*channels + iChannel]; + } + } +} + +static void ma_pcm_deinterleave_f32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_f32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_f32__optimized(dst, src, frameCount, channels); +#endif +} + + +MA_API void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode) +{ + if (formatOut == formatIn) { + ma_copy_memory_64(pOut, pIn, sampleCount * ma_get_bytes_per_sample(formatOut)); + return; + } + + switch (formatIn) + { + case ma_format_u8: + { + switch (formatOut) + { + case ma_format_s16: ma_pcm_u8_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_u8_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_u8_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_u8_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s16: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s16_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_s16_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_s16_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s16_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s24: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s24_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_s24_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_s24_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s24_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s32: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s32_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_s32_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_s32_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s32_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_f32: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_f32_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_f32_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_f32_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_f32_to_s32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + default: break; + } +} + +MA_API void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode) +{ + ma_pcm_convert(pOut, formatOut, pIn, formatIn, frameCount * channels, ditherMode); +} + +MA_API void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames) +{ + if (pInterleavedPCMFrames == NULL || ppDeinterleavedPCMFrames == NULL) { + return; /* Invalid args. */ + } + + /* For efficiency we do this per format. */ + switch (format) { + case ma_format_s16: + { + const ma_int16* pSrcS16 = (const ma_int16*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + ma_int16* pDstS16 = (ma_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame] = pSrcS16[iPCMFrame*channels+iChannel]; + } + } + } break; + + case ma_format_f32: + { + const float* pSrcF32 = (const float*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + float* pDstF32 = (float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame] = pSrcF32[iPCMFrame*channels+iChannel]; + } + } + } break; + + default: + { + ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format); + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + const void* pSrc = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + +MA_API void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames) +{ + switch (format) + { + case ma_format_s16: + { + ma_int16* pDstS16 = (ma_int16*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + const ma_int16* pSrcS16 = (const ma_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame*channels+iChannel] = pSrcS16[iPCMFrame]; + } + } + } break; + + case ma_format_f32: + { + float* pDstF32 = (float*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + const float* pSrcF32 = (const float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame*channels+iChannel] = pSrcF32[iPCMFrame]; + } + } + } break; + + default: + { + ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format); + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + const void* pSrc = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + + +/************************************************************************************************************************************************************** + +Biquad Filter + +**************************************************************************************************************************************************************/ +#ifndef MA_BIQUAD_FIXED_POINT_SHIFT +#define MA_BIQUAD_FIXED_POINT_SHIFT 14 +#endif + +static ma_int32 ma_biquad_float_to_fp(double x) +{ + return (ma_int32)(x * (1 << MA_BIQUAD_FIXED_POINT_SHIFT)); +} + +MA_API ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2) +{ + ma_biquad_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.b0 = b0; + config.b1 = b1; + config.b2 = b2; + config.a0 = a0; + config.a1 = a1; + config.a2 = a2; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t r1Offset; + size_t r2Offset; +} ma_biquad_heap_layout; + +static ma_result ma_biquad_get_heap_layout(const ma_biquad_config* pConfig, ma_biquad_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* R0 */ + pHeapLayout->r1Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* R1 */ + pHeapLayout->r2Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_biquad_get_heap_size(const ma_biquad_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_biquad_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_biquad_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_biquad_init_preallocated(const ma_biquad_config* pConfig, void* pHeap, ma_biquad* pBQ) +{ + ma_result result; + ma_biquad_heap_layout heapLayout; + + if (pBQ == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBQ); + + result = ma_biquad_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pBQ->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pBQ->pR1 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r1Offset); + pBQ->pR2 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r2Offset); + + return ma_biquad_reinit(pConfig, pBQ); +} + +MA_API ma_result ma_biquad_init(const ma_biquad_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad* pBQ) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_biquad_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_biquad_init_preallocated(pConfig, pHeap, pBQ); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pBQ->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_biquad_uninit(ma_biquad* pBQ, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pBQ == NULL) { + return; + } + + if (pBQ->_ownsHeap) { + ma_free(pBQ->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ) +{ + if (pBQ == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->a0 == 0) { + return MA_INVALID_ARGS; /* Division by zero. */ + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pBQ->format != ma_format_unknown && pBQ->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pBQ->channels != 0 && pBQ->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + + pBQ->format = pConfig->format; + pBQ->channels = pConfig->channels; + + /* Normalize. */ + if (pConfig->format == ma_format_f32) { + pBQ->b0.f32 = (float)(pConfig->b0 / pConfig->a0); + pBQ->b1.f32 = (float)(pConfig->b1 / pConfig->a0); + pBQ->b2.f32 = (float)(pConfig->b2 / pConfig->a0); + pBQ->a1.f32 = (float)(pConfig->a1 / pConfig->a0); + pBQ->a2.f32 = (float)(pConfig->a2 / pConfig->a0); + } else { + pBQ->b0.s32 = ma_biquad_float_to_fp(pConfig->b0 / pConfig->a0); + pBQ->b1.s32 = ma_biquad_float_to_fp(pConfig->b1 / pConfig->a0); + pBQ->b2.s32 = ma_biquad_float_to_fp(pConfig->b2 / pConfig->a0); + pBQ->a1.s32 = ma_biquad_float_to_fp(pConfig->a1 / pConfig->a0); + pBQ->a2.s32 = ma_biquad_float_to_fp(pConfig->a2 / pConfig->a0); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_biquad_clear_cache(ma_biquad* pBQ) +{ + if (pBQ == NULL) { + return MA_INVALID_ARGS; + } + + if (pBQ->format == ma_format_f32) { + pBQ->pR1->f32 = 0; + pBQ->pR2->f32 = 0; + } else { + pBQ->pR1->s32 = 0; + pBQ->pR2->s32 = 0; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(ma_biquad* pBQ, float* pY, const float* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pBQ->channels; + const float b0 = pBQ->b0.f32; + const float b1 = pBQ->b1.f32; + const float b2 = pBQ->b2.f32; + const float a1 = pBQ->a1.f32; + const float a2 = pBQ->a2.f32; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float r1 = pBQ->pR1[c].f32; + float r2 = pBQ->pR2[c].f32; + float x = pX[c]; + float y; + + y = b0*x + r1; + r1 = b1*x - a1*y + r2; + r2 = b2*x - a2*y; + + pY[c] = y; + pBQ->pR1[c].f32 = r1; + pBQ->pR2[c].f32 = r2; + } +} + +static MA_INLINE void ma_biquad_process_pcm_frame_f32(ma_biquad* pBQ, float* pY, const float* pX) +{ + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); +} + +static MA_INLINE void ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pBQ->channels; + const ma_int32 b0 = pBQ->b0.s32; + const ma_int32 b1 = pBQ->b1.s32; + const ma_int32 b2 = pBQ->b2.s32; + const ma_int32 a1 = pBQ->a1.s32; + const ma_int32 a2 = pBQ->a2.s32; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int32 r1 = pBQ->pR1[c].s32; + ma_int32 r2 = pBQ->pR2[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b0*x + r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + r1 = (b1*x - a1*y + r2); + r2 = (b2*x - a2*y); + + pY[c] = (ma_int16)ma_clamp(y, -32768, 32767); + pBQ->pR1[c].s32 = r1; + pBQ->pR2[c].s32 = r2; + } +} + +static MA_INLINE void ma_biquad_process_pcm_frame_s16(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); +} + +MA_API ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pBQ == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pBQ->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else if (pBQ->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_biquad_get_latency(const ma_biquad* pBQ) +{ + if (pBQ == NULL) { + return 0; + } + + return 2; +} + + +/************************************************************************************************************************************************************** + +Low-Pass Filter + +**************************************************************************************************************************************************************/ +MA_API ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency) +{ + ma_lpf1_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = 0.5; + + return config; +} + +MA_API ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_lpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t r1Offset; +} ma_lpf1_heap_layout; + +static ma_result ma_lpf1_get_heap_layout(const ma_lpf1_config* pConfig, ma_lpf1_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* R1 */ + pHeapLayout->r1Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf1_get_heap_size(const ma_lpf1_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_lpf1_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_lpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf1_init_preallocated(const ma_lpf1_config* pConfig, void* pHeap, ma_lpf1* pLPF) +{ + ma_result result; + ma_lpf1_heap_layout heapLayout; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + result = ma_lpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pLPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pLPF->pR1 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r1Offset); + + return ma_lpf1_reinit(pConfig, pLPF); +} + +MA_API ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf1* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_lpf1_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_lpf1_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_lpf1_uninit(ma_lpf1* pLPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pLPF == NULL) { + return; + } + + if (pLPF->_ownsHeap) { + ma_free(pLPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF) +{ + double a; + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + pLPF->format = pConfig->format; + pLPF->channels = pConfig->channels; + + a = ma_expd(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate); + if (pConfig->format == ma_format_f32) { + pLPF->a.f32 = (float)a; + } else { + pLPF->a.s32 = ma_biquad_float_to_fp(a); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf1_clear_cache(ma_lpf1* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + if (pLPF->format == ma_format_f32) { + pLPF->a.f32 = 0; + } else { + pLPF->a.s32 = 0; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf1_process_pcm_frame_f32(ma_lpf1* pLPF, float* pY, const float* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pLPF->channels; + const float a = pLPF->a.f32; + const float b = 1 - a; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float r1 = pLPF->pR1[c].f32; + float x = pX[c]; + float y; + + y = b*x + a*r1; + + pY[c] = y; + pLPF->pR1[c].f32 = y; + } +} + +static MA_INLINE void ma_lpf1_process_pcm_frame_s16(ma_lpf1* pLPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pLPF->channels; + const ma_int32 a = pLPF->a.s32; + const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a); + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int32 r1 = pLPF->pR1[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b*x + a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + + pY[c] = (ma_int16)y; + pLPF->pR1[c].s32 = (ma_int32)y; + } +} + +MA_API ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pLPF == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pLPF->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_lpf1_process_pcm_frame_f32(pLPF, pY, pX); + pY += pLPF->channels; + pX += pLPF->channels; + } + } else if (pLPF->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_lpf1_process_pcm_frame_s16(pLPF, pY, pX); + pY += pLPF->channels; + pX += pLPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_lpf1_get_latency(const ma_lpf1* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return 1; +} + + +static MA_INLINE ma_biquad_config ma_lpf2__get_biquad_config(const ma_lpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = (1 - c) / 2; + bqConfig.b1 = 1 - c; + bqConfig.b2 = (1 - c) / 2; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_lpf2_get_heap_size(const ma_lpf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_lpf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_lpf2_init_preallocated(const ma_lpf2_config* pConfig, void* pHeap, ma_lpf2* pLPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_lpf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf2* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_lpf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_lpf2_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_lpf2_uninit(ma_lpf2* pLPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pLPF == NULL) { + return; + } + + ma_biquad_uninit(&pLPF->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_lpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf2_clear_cache(ma_lpf2* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + ma_biquad_clear_cache(&pLPF->bq); + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf2_process_pcm_frame_s16(ma_lpf2* pLPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pLPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_lpf2_process_pcm_frame_f32(ma_lpf2* pLPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pLPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pLPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_lpf2_get_latency(const ma_lpf2* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pLPF->bq); +} + + +MA_API ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_lpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t lpf1Offset; + size_t lpf2Offset; /* Offset of the first second order filter. Subsequent filters will come straight after, and will each have the same heap size. */ +} ma_lpf_heap_layout; + +static void ma_lpf_calculate_sub_lpf_counts(ma_uint32 order, ma_uint32* pLPF1Count, ma_uint32* pLPF2Count) +{ + MA_ASSERT(pLPF1Count != NULL); + MA_ASSERT(pLPF2Count != NULL); + + *pLPF1Count = order % 2; + *pLPF2Count = order / 2; +} + +static ma_result ma_lpf_get_heap_layout(const ma_lpf_config* pConfig, ma_lpf_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_lpf_calculate_sub_lpf_counts(pConfig->order, &lpf1Count, &lpf2Count); + + pHeapLayout->sizeInBytes = 0; + + /* LPF 1 */ + pHeapLayout->lpf1Offset = pHeapLayout->sizeInBytes; + for (ilpf1 = 0; ilpf1 < lpf1Count; ilpf1 += 1) { + size_t lpf1HeapSizeInBytes; + ma_lpf1_config lpf1Config = ma_lpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + result = ma_lpf1_get_heap_size(&lpf1Config, &lpf1HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_lpf1) + lpf1HeapSizeInBytes; + } + + /* LPF 2*/ + pHeapLayout->lpf2Offset = pHeapLayout->sizeInBytes; + for (ilpf2 = 0; ilpf2 < lpf2Count; ilpf2 += 1) { + size_t lpf2HeapSizeInBytes; + ma_lpf2_config lpf2Config = ma_lpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, 0.707107); /* <-- The "q" parameter does not matter for the purpose of calculating the heap size. */ + + result = ma_lpf2_get_heap_size(&lpf2Config, &lpf2HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_lpf2) + lpf2HeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +static ma_result ma_lpf_reinit__internal(const ma_lpf_config* pConfig, void* pHeap, ma_lpf* pLPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + ma_lpf_heap_layout heapLayout; /* Only used if isNew is true. */ + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_lpf_calculate_sub_lpf_counts(pConfig->order, &lpf1Count, &lpf2Count); + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pLPF->lpf1Count != lpf1Count || pLPF->lpf2Count != lpf2Count) { + return MA_INVALID_OPERATION; + } + } + + if (isNew) { + result = ma_lpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pLPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pLPF->pLPF1 = (ma_lpf1*)ma_offset_ptr(pHeap, heapLayout.lpf1Offset); + pLPF->pLPF2 = (ma_lpf2*)ma_offset_ptr(pHeap, heapLayout.lpf2Offset); + } else { + MA_ZERO_OBJECT(&heapLayout); /* To silence a compiler warning. */ + } + + for (ilpf1 = 0; ilpf1 < lpf1Count; ilpf1 += 1) { + ma_lpf1_config lpf1Config = ma_lpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + if (isNew) { + size_t lpf1HeapSizeInBytes; + + result = ma_lpf1_get_heap_size(&lpf1Config, &lpf1HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_lpf1_init_preallocated(&lpf1Config, ma_offset_ptr(pHeap, heapLayout.lpf1Offset + (sizeof(ma_lpf1) * lpf1Count) + (ilpf1 * lpf1HeapSizeInBytes)), &pLPF->pLPF1[ilpf1]); + } + } else { + result = ma_lpf1_reinit(&lpf1Config, &pLPF->pLPF1[ilpf1]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jlpf1; + + for (jlpf1 = 0; jlpf1 < ilpf1; jlpf1 += 1) { + ma_lpf1_uninit(&pLPF->pLPF1[jlpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + for (ilpf2 = 0; ilpf2 < lpf2Count; ilpf2 += 1) { + ma_lpf2_config lpf2Config; + double q; + double a; + + /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */ + if (lpf1Count == 1) { + a = (1 + ilpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */ + } else { + a = (1 + ilpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */ + } + q = 1 / (2*ma_cosd(a)); + + lpf2Config = ma_lpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + size_t lpf2HeapSizeInBytes; + + result = ma_lpf2_get_heap_size(&lpf2Config, &lpf2HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_lpf2_init_preallocated(&lpf2Config, ma_offset_ptr(pHeap, heapLayout.lpf2Offset + (sizeof(ma_lpf2) * lpf2Count) + (ilpf2 * lpf2HeapSizeInBytes)), &pLPF->pLPF2[ilpf2]); + } + } else { + result = ma_lpf2_reinit(&lpf2Config, &pLPF->pLPF2[ilpf2]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jlpf1; + ma_uint32 jlpf2; + + for (jlpf1 = 0; jlpf1 < lpf1Count; jlpf1 += 1) { + ma_lpf1_uninit(&pLPF->pLPF1[jlpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + for (jlpf2 = 0; jlpf2 < ilpf2; jlpf2 += 1) { + ma_lpf2_uninit(&pLPF->pLPF2[jlpf2], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + pLPF->lpf1Count = lpf1Count; + pLPF->lpf2Count = lpf2Count; + pLPF->format = pConfig->format; + pLPF->channels = pConfig->channels; + pLPF->sampleRate = pConfig->sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf_get_heap_size(const ma_lpf_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_lpf_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_lpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return result; +} + +MA_API ma_result ma_lpf_init_preallocated(const ma_lpf_config* pConfig, void* pHeap, ma_lpf* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + return ma_lpf_reinit__internal(pConfig, pHeap, pLPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_lpf_init(const ma_lpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_lpf_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_lpf_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_lpf_uninit(ma_lpf* pLPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL) { + return; + } + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_uninit(&pLPF->pLPF1[ilpf1], pAllocationCallbacks); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_uninit(&pLPF->pLPF2[ilpf2], pAllocationCallbacks); + } + + if (pLPF->_ownsHeap) { + ma_free(pLPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF) +{ + return ma_lpf_reinit__internal(pConfig, NULL, pLPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_lpf_clear_cache(ma_lpf* pLPF) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_clear_cache(&pLPF->pLPF1[ilpf1]); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_clear_cache(&pLPF->pLPF2[ilpf2]); + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf_process_pcm_frame_f32(ma_lpf* pLPF, float* pY, const void* pX) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pLPF->format == ma_format_f32); + + MA_MOVE_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels)); + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_process_pcm_frame_f32(&pLPF->pLPF1[ilpf1], pY, pY); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_process_pcm_frame_f32(&pLPF->pLPF2[ilpf2], pY, pY); + } +} + +static MA_INLINE void ma_lpf_process_pcm_frame_s16(ma_lpf* pLPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pLPF->format == ma_format_s16); + + MA_MOVE_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels)); + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_process_pcm_frame_s16(&pLPF->pLPF1[ilpf1], pY, pY); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_process_pcm_frame_s16(&pLPF->pLPF2[ilpf2], pY, pY); + } +} + +MA_API ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + result = ma_lpf1_process_pcm_frames(&pLPF->pLPF1[ilpf1], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + result = ma_lpf2_process_pcm_frames(&pLPF->pLPF2[ilpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pLPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_lpf_process_pcm_frame_f32(pLPF, pFramesOutF32, pFramesInF32); + pFramesOutF32 += pLPF->channels; + pFramesInF32 += pLPF->channels; + } + } else if (pLPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_lpf_process_pcm_frame_s16(pLPF, pFramesOutS16, pFramesInS16); + pFramesOutS16 += pLPF->channels; + pFramesInS16 += pLPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_lpf_get_latency(const ma_lpf* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return pLPF->lpf2Count*2 + pLPF->lpf1Count; +} + + +/************************************************************************************************************************************************************** + +High-Pass Filtering + +**************************************************************************************************************************************************************/ +MA_API ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency) +{ + ma_hpf1_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + + return config; +} + +MA_API ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_hpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t r1Offset; +} ma_hpf1_heap_layout; + +static ma_result ma_hpf1_get_heap_layout(const ma_hpf1_config* pConfig, ma_hpf1_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* R1 */ + pHeapLayout->r1Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf1_get_heap_size(const ma_hpf1_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_hpf1_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_hpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf1_init_preallocated(const ma_hpf1_config* pConfig, void* pHeap, ma_hpf1* pLPF) +{ + ma_result result; + ma_hpf1_heap_layout heapLayout; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + result = ma_hpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pLPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pLPF->pR1 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r1Offset); + + return ma_hpf1_reinit(pConfig, pLPF); +} + +MA_API ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf1* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hpf1_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hpf1_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_hpf1_uninit(ma_hpf1* pHPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pHPF == NULL) { + return; + } + + if (pHPF->_ownsHeap) { + ma_free(pHPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF) +{ + double a; + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + pHPF->format = pConfig->format; + pHPF->channels = pConfig->channels; + + a = ma_expd(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate); + if (pConfig->format == ma_format_f32) { + pHPF->a.f32 = (float)a; + } else { + pHPF->a.s32 = ma_biquad_float_to_fp(a); + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hpf1_process_pcm_frame_f32(ma_hpf1* pHPF, float* pY, const float* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pHPF->channels; + const float a = 1 - pHPF->a.f32; + const float b = 1 - a; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float r1 = pHPF->pR1[c].f32; + float x = pX[c]; + float y; + + y = b*x - a*r1; + + pY[c] = y; + pHPF->pR1[c].f32 = y; + } +} + +static MA_INLINE void ma_hpf1_process_pcm_frame_s16(ma_hpf1* pHPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pHPF->channels; + const ma_int32 a = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - pHPF->a.s32); + const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a); + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int32 r1 = pHPF->pR1[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b*x - a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + + pY[c] = (ma_int16)y; + pHPF->pR1[c].s32 = (ma_int32)y; + } +} + +MA_API ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pHPF == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pHPF->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_hpf1_process_pcm_frame_f32(pHPF, pY, pX); + pY += pHPF->channels; + pX += pHPF->channels; + } + } else if (pHPF->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_hpf1_process_pcm_frame_s16(pHPF, pY, pX); + pY += pHPF->channels; + pX += pHPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_hpf1_get_latency(const ma_hpf1* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return 1; +} + + +static MA_INLINE ma_biquad_config ma_hpf2__get_biquad_config(const ma_hpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = (1 + c) / 2; + bqConfig.b1 = -(1 + c); + bqConfig.b2 = (1 + c) / 2; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_hpf2_get_heap_size(const ma_hpf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_hpf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_hpf2_init_preallocated(const ma_hpf2_config* pConfig, void* pHeap, ma_hpf2* pHPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pHPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hpf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pHPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf2* pHPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hpf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hpf2_init_preallocated(pConfig, pHeap, pHPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pHPF->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_hpf2_uninit(ma_hpf2* pHPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pHPF == NULL) { + return; + } + + ma_biquad_uninit(&pHPF->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pHPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hpf2_process_pcm_frame_s16(ma_hpf2* pHPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pHPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_hpf2_process_pcm_frame_f32(ma_hpf2* pHPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pHPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pHPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_hpf2_get_latency(const ma_hpf2* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pHPF->bq); +} + + +MA_API ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_hpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t hpf1Offset; + size_t hpf2Offset; /* Offset of the first second order filter. Subsequent filters will come straight after, and will each have the same heap size. */ +} ma_hpf_heap_layout; + +static void ma_hpf_calculate_sub_hpf_counts(ma_uint32 order, ma_uint32* pHPF1Count, ma_uint32* pHPF2Count) +{ + MA_ASSERT(pHPF1Count != NULL); + MA_ASSERT(pHPF2Count != NULL); + + *pHPF1Count = order % 2; + *pHPF2Count = order / 2; +} + +static ma_result ma_hpf_get_heap_layout(const ma_hpf_config* pConfig, ma_hpf_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_hpf_calculate_sub_hpf_counts(pConfig->order, &hpf1Count, &hpf2Count); + + pHeapLayout->sizeInBytes = 0; + + /* HPF 1 */ + pHeapLayout->hpf1Offset = pHeapLayout->sizeInBytes; + for (ihpf1 = 0; ihpf1 < hpf1Count; ihpf1 += 1) { + size_t hpf1HeapSizeInBytes; + ma_hpf1_config hpf1Config = ma_hpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + result = ma_hpf1_get_heap_size(&hpf1Config, &hpf1HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_hpf1) + hpf1HeapSizeInBytes; + } + + /* HPF 2*/ + pHeapLayout->hpf2Offset = pHeapLayout->sizeInBytes; + for (ihpf2 = 0; ihpf2 < hpf2Count; ihpf2 += 1) { + size_t hpf2HeapSizeInBytes; + ma_hpf2_config hpf2Config = ma_hpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, 0.707107); /* <-- The "q" parameter does not matter for the purpose of calculating the heap size. */ + + result = ma_hpf2_get_heap_size(&hpf2Config, &hpf2HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_hpf2) + hpf2HeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +static ma_result ma_hpf_reinit__internal(const ma_hpf_config* pConfig, void* pHeap, ma_hpf* pHPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + ma_hpf_heap_layout heapLayout; /* Only used if isNew is true. */ + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_hpf_calculate_sub_hpf_counts(pConfig->order, &hpf1Count, &hpf2Count); + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pHPF->hpf1Count != hpf1Count || pHPF->hpf2Count != hpf2Count) { + return MA_INVALID_OPERATION; + } + } + + if (isNew) { + result = ma_hpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pHPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pHPF->pHPF1 = (ma_hpf1*)ma_offset_ptr(pHeap, heapLayout.hpf1Offset); + pHPF->pHPF2 = (ma_hpf2*)ma_offset_ptr(pHeap, heapLayout.hpf2Offset); + } else { + MA_ZERO_OBJECT(&heapLayout); /* To silence a compiler warning. */ + } + + for (ihpf1 = 0; ihpf1 < hpf1Count; ihpf1 += 1) { + ma_hpf1_config hpf1Config = ma_hpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + if (isNew) { + size_t hpf1HeapSizeInBytes; + + result = ma_hpf1_get_heap_size(&hpf1Config, &hpf1HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_hpf1_init_preallocated(&hpf1Config, ma_offset_ptr(pHeap, heapLayout.hpf1Offset + (sizeof(ma_hpf1) * hpf1Count) + (ihpf1 * hpf1HeapSizeInBytes)), &pHPF->pHPF1[ihpf1]); + } + } else { + result = ma_hpf1_reinit(&hpf1Config, &pHPF->pHPF1[ihpf1]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jhpf1; + + for (jhpf1 = 0; jhpf1 < ihpf1; jhpf1 += 1) { + ma_hpf1_uninit(&pHPF->pHPF1[jhpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + for (ihpf2 = 0; ihpf2 < hpf2Count; ihpf2 += 1) { + ma_hpf2_config hpf2Config; + double q; + double a; + + /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */ + if (hpf1Count == 1) { + a = (1 + ihpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */ + } else { + a = (1 + ihpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */ + } + q = 1 / (2*ma_cosd(a)); + + hpf2Config = ma_hpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + size_t hpf2HeapSizeInBytes; + + result = ma_hpf2_get_heap_size(&hpf2Config, &hpf2HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_hpf2_init_preallocated(&hpf2Config, ma_offset_ptr(pHeap, heapLayout.hpf2Offset + (sizeof(ma_hpf2) * hpf2Count) + (ihpf2 * hpf2HeapSizeInBytes)), &pHPF->pHPF2[ihpf2]); + } + } else { + result = ma_hpf2_reinit(&hpf2Config, &pHPF->pHPF2[ihpf2]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jhpf1; + ma_uint32 jhpf2; + + for (jhpf1 = 0; jhpf1 < hpf1Count; jhpf1 += 1) { + ma_hpf1_uninit(&pHPF->pHPF1[jhpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + for (jhpf2 = 0; jhpf2 < ihpf2; jhpf2 += 1) { + ma_hpf2_uninit(&pHPF->pHPF2[jhpf2], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + pHPF->hpf1Count = hpf1Count; + pHPF->hpf2Count = hpf2Count; + pHPF->format = pConfig->format; + pHPF->channels = pConfig->channels; + pHPF->sampleRate = pConfig->sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf_get_heap_size(const ma_hpf_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_hpf_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_hpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return result; +} + +MA_API ma_result ma_hpf_init_preallocated(const ma_hpf_config* pConfig, void* pHeap, ma_hpf* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + return ma_hpf_reinit__internal(pConfig, pHeap, pLPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_hpf_init(const ma_hpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf* pHPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hpf_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hpf_init_preallocated(pConfig, pHeap, pHPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pHPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_hpf_uninit(ma_hpf* pHPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + if (pHPF == NULL) { + return; + } + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_uninit(&pHPF->pHPF1[ihpf1], pAllocationCallbacks); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_uninit(&pHPF->pHPF2[ihpf2], pAllocationCallbacks); + } + + if (pHPF->_ownsHeap) { + ma_free(pHPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF) +{ + return ma_hpf_reinit__internal(pConfig, NULL, pHPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + result = ma_hpf1_process_pcm_frames(&pHPF->pHPF1[ihpf1], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + result = ma_hpf2_process_pcm_frames(&pHPF->pHPF2[ihpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pHPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pHPF->format, pHPF->channels)); + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_process_pcm_frame_f32(&pHPF->pHPF1[ihpf1], pFramesOutF32, pFramesOutF32); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_process_pcm_frame_f32(&pHPF->pHPF2[ihpf2], pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pHPF->channels; + pFramesInF32 += pHPF->channels; + } + } else if (pHPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pHPF->format, pHPF->channels)); + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_process_pcm_frame_s16(&pHPF->pHPF1[ihpf1], pFramesOutS16, pFramesOutS16); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_process_pcm_frame_s16(&pHPF->pHPF2[ihpf2], pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pHPF->channels; + pFramesInS16 += pHPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_hpf_get_latency(const ma_hpf* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return pHPF->hpf2Count*2 + pHPF->hpf1Count; +} + + +/************************************************************************************************************************************************************** + +Band-Pass Filtering + +**************************************************************************************************************************************************************/ +MA_API ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_bpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_bpf2__get_biquad_config(const ma_bpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = q * a; + bqConfig.b1 = 0; + bqConfig.b2 = -q * a; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_bpf2_get_heap_size(const ma_bpf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_bpf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_bpf2_init_preallocated(const ma_bpf2_config* pConfig, void* pHeap, ma_bpf2* pBPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_bpf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pBPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf2* pBPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_bpf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_bpf2_init_preallocated(pConfig, pHeap, pBPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pBPF->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_bpf2_uninit(ma_bpf2* pBPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pBPF == NULL) { + return; + } + + ma_biquad_uninit(&pBPF->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pBPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_bpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pBPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_bpf2_process_pcm_frame_s16(ma_bpf2* pBPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pBPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_bpf2_process_pcm_frame_f32(ma_bpf2* pBPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pBPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pBPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_bpf2_get_latency(const ma_bpf2* pBPF) +{ + if (pBPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pBPF->bq); +} + + +MA_API ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_bpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t bpf2Offset; +} ma_bpf_heap_layout; + +static ma_result ma_bpf_get_heap_layout(const ma_bpf_config* pConfig, ma_bpf_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 bpf2Count; + ma_uint32 ibpf2; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + /* We must have an even number of order. */ + if ((pConfig->order & 0x1) != 0) { + return MA_INVALID_ARGS; + } + + bpf2Count = pConfig->channels / 2; + + pHeapLayout->sizeInBytes = 0; + + /* BPF 2 */ + pHeapLayout->bpf2Offset = pHeapLayout->sizeInBytes; + for (ibpf2 = 0; ibpf2 < bpf2Count; ibpf2 += 1) { + size_t bpf2HeapSizeInBytes; + ma_bpf2_config bpf2Config = ma_bpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, 0.707107); /* <-- The "q" parameter does not matter for the purpose of calculating the heap size. */ + + result = ma_bpf2_get_heap_size(&bpf2Config, &bpf2HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_bpf2) + bpf2HeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +static ma_result ma_bpf_reinit__internal(const ma_bpf_config* pConfig, void* pHeap, ma_bpf* pBPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 bpf2Count; + ma_uint32 ibpf2; + ma_bpf_heap_layout heapLayout; /* Only used if isNew is true. */ + + if (pBPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pBPF->format != ma_format_unknown && pBPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pBPF->channels != 0 && pBPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + /* We must have an even number of order. */ + if ((pConfig->order & 0x1) != 0) { + return MA_INVALID_ARGS; + } + + bpf2Count = pConfig->order / 2; + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pBPF->bpf2Count != bpf2Count) { + return MA_INVALID_OPERATION; + } + } + + if (isNew) { + result = ma_bpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pBPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pBPF->pBPF2 = (ma_bpf2*)ma_offset_ptr(pHeap, heapLayout.bpf2Offset); + } else { + MA_ZERO_OBJECT(&heapLayout); + } + + for (ibpf2 = 0; ibpf2 < bpf2Count; ibpf2 += 1) { + ma_bpf2_config bpf2Config; + double q; + + /* TODO: Calculate Q to make this a proper Butterworth filter. */ + q = 0.707107; + + bpf2Config = ma_bpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + size_t bpf2HeapSizeInBytes; + + result = ma_bpf2_get_heap_size(&bpf2Config, &bpf2HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_bpf2_init_preallocated(&bpf2Config, ma_offset_ptr(pHeap, heapLayout.bpf2Offset + (sizeof(ma_bpf2) * bpf2Count) + (ibpf2 * bpf2HeapSizeInBytes)), &pBPF->pBPF2[ibpf2]); + } + } else { + result = ma_bpf2_reinit(&bpf2Config, &pBPF->pBPF2[ibpf2]); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + pBPF->bpf2Count = bpf2Count; + pBPF->format = pConfig->format; + pBPF->channels = pConfig->channels; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_bpf_get_heap_size(const ma_bpf_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_bpf_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_bpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_bpf_init_preallocated(const ma_bpf_config* pConfig, void* pHeap, ma_bpf* pBPF) +{ + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBPF); + + return ma_bpf_reinit__internal(pConfig, pHeap, pBPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_bpf_init(const ma_bpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf* pBPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_bpf_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_bpf_init_preallocated(pConfig, pHeap, pBPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pBPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_bpf_uninit(ma_bpf* pBPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint32 ibpf2; + + if (pBPF == NULL) { + return; + } + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_uninit(&pBPF->pBPF2[ibpf2], pAllocationCallbacks); + } + + if (pBPF->_ownsHeap) { + ma_free(pBPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF) +{ + return ma_bpf_reinit__internal(pConfig, NULL, pBPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ibpf2; + + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + result = ma_bpf2_process_pcm_frames(&pBPF->pBPF2[ibpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pBPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pBPF->format, pBPF->channels)); + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_process_pcm_frame_f32(&pBPF->pBPF2[ibpf2], pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pBPF->channels; + pFramesInF32 += pBPF->channels; + } + } else if (pBPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pBPF->format, pBPF->channels)); + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_process_pcm_frame_s16(&pBPF->pBPF2[ibpf2], pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pBPF->channels; + pFramesInS16 += pBPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_bpf_get_latency(const ma_bpf* pBPF) +{ + if (pBPF == NULL) { + return 0; + } + + return pBPF->bpf2Count*2; +} + + +/************************************************************************************************************************************************************** + +Notching Filter + +**************************************************************************************************************************************************************/ +MA_API ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency) +{ + ma_notch2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.q = q; + config.frequency = frequency; + + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_notch2__get_biquad_config(const ma_notch2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = 1; + bqConfig.b1 = -2 * c; + bqConfig.b2 = 1; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_notch2_get_heap_size(const ma_notch2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_notch2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_notch2_init_preallocated(const ma_notch2_config* pConfig, void* pHeap, ma_notch2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_notch2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_notch2_init(const ma_notch2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_notch2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_notch2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_notch2_uninit(ma_notch2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_notch2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_notch2_process_pcm_frame_s16(ma_notch2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_notch2_process_pcm_frame_f32(ma_notch2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_notch2_get_latency(const ma_notch2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + + +/************************************************************************************************************************************************************** + +Peaking EQ Filter + +**************************************************************************************************************************************************************/ +MA_API ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_peak2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.q = q; + config.frequency = frequency; + + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_peak2__get_biquad_config(const ma_peak2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + double A; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + A = ma_powd(10, (pConfig->gainDB / 40)); + + bqConfig.b0 = 1 + (a * A); + bqConfig.b1 = -2 * c; + bqConfig.b2 = 1 - (a * A); + bqConfig.a0 = 1 + (a / A); + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - (a / A); + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_peak2_get_heap_size(const ma_peak2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_peak2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_peak2_init_preallocated(const ma_peak2_config* pConfig, void* pHeap, ma_peak2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_peak2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_peak2_init(const ma_peak2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_peak2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_peak2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_peak2_uninit(ma_peak2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_peak2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_peak2_process_pcm_frame_s16(ma_peak2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_peak2_process_pcm_frame_f32(ma_peak2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_peak2_get_latency(const ma_peak2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + +/************************************************************************************************************************************************************** + +Low Shelf Filter + +**************************************************************************************************************************************************************/ +MA_API ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency) +{ + ma_loshelf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.shelfSlope = shelfSlope; + config.frequency = frequency; + + return config; +} + + +static MA_INLINE ma_biquad_config ma_loshelf2__get_biquad_config(const ma_loshelf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double w; + double s; + double c; + double A; + double S; + double a; + double sqrtA; + + MA_ASSERT(pConfig != NULL); + + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + A = ma_powd(10, (pConfig->gainDB / 40)); + S = pConfig->shelfSlope; + a = s/2 * ma_sqrtd((A + 1/A) * (1/S - 1) + 2); + sqrtA = 2*ma_sqrtd(A)*a; + + bqConfig.b0 = A * ((A + 1) - (A - 1)*c + sqrtA); + bqConfig.b1 = 2 * A * ((A - 1) - (A + 1)*c); + bqConfig.b2 = A * ((A + 1) - (A - 1)*c - sqrtA); + bqConfig.a0 = (A + 1) + (A - 1)*c + sqrtA; + bqConfig.a1 = -2 * ((A - 1) + (A + 1)*c); + bqConfig.a2 = (A + 1) + (A - 1)*c - sqrtA; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_loshelf2_get_heap_size(const ma_loshelf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_loshelf2_init_preallocated(const ma_loshelf2_config* pConfig, void* pHeap, ma_loshelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_loshelf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_loshelf2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_loshelf2_uninit(ma_loshelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_loshelf2_process_pcm_frame_s16(ma_loshelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_loshelf2_process_pcm_frame_f32(ma_loshelf2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_loshelf2_get_latency(const ma_loshelf2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + +/************************************************************************************************************************************************************** + +High Shelf Filter + +**************************************************************************************************************************************************************/ +MA_API ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency) +{ + ma_hishelf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.shelfSlope = shelfSlope; + config.frequency = frequency; + + return config; +} + + +static MA_INLINE ma_biquad_config ma_hishelf2__get_biquad_config(const ma_hishelf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double w; + double s; + double c; + double A; + double S; + double a; + double sqrtA; + + MA_ASSERT(pConfig != NULL); + + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + A = ma_powd(10, (pConfig->gainDB / 40)); + S = pConfig->shelfSlope; + a = s/2 * ma_sqrtd((A + 1/A) * (1/S - 1) + 2); + sqrtA = 2*ma_sqrtd(A)*a; + + bqConfig.b0 = A * ((A + 1) + (A - 1)*c + sqrtA); + bqConfig.b1 = -2 * A * ((A - 1) + (A + 1)*c); + bqConfig.b2 = A * ((A + 1) + (A - 1)*c - sqrtA); + bqConfig.a0 = (A + 1) - (A - 1)*c + sqrtA; + bqConfig.a1 = 2 * ((A - 1) - (A + 1)*c); + bqConfig.a2 = (A + 1) - (A - 1)*c - sqrtA; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_hishelf2_get_heap_size(const ma_hishelf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_hishelf2_init_preallocated(const ma_hishelf2_config* pConfig, void* pHeap, ma_hishelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hishelf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hishelf2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_hishelf2_uninit(ma_hishelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hishelf2_process_pcm_frame_s16(ma_hishelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_hishelf2_process_pcm_frame_f32(ma_hishelf2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_hishelf2_get_latency(const ma_hishelf2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + + +/* +Delay +*/ +MA_API ma_delay_config ma_delay_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay) +{ + ma_delay_config config; + + MA_ZERO_OBJECT(&config); + config.channels = channels; + config.sampleRate = sampleRate; + config.delayInFrames = delayInFrames; + config.delayStart = (decay == 0) ? MA_TRUE : MA_FALSE; /* Delay the start if it looks like we're not configuring an echo. */ + config.wet = 1; + config.dry = 1; + config.decay = decay; + + return config; +} + + +MA_API ma_result ma_delay_init(const ma_delay_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay* pDelay) +{ + if (pDelay == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDelay); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->decay < 0 || pConfig->decay > 1) { + return MA_INVALID_ARGS; + } + + pDelay->config = *pConfig; + pDelay->bufferSizeInFrames = pConfig->delayInFrames; + pDelay->cursor = 0; + + pDelay->pBuffer = (float*)ma_malloc((size_t)(pDelay->bufferSizeInFrames * ma_get_bytes_per_frame(ma_format_f32, pConfig->channels)), pAllocationCallbacks); + if (pDelay->pBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma_silence_pcm_frames(pDelay->pBuffer, pDelay->bufferSizeInFrames, ma_format_f32, pConfig->channels); + + return MA_SUCCESS; +} + +MA_API void ma_delay_uninit(ma_delay* pDelay, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pDelay == NULL) { + return; + } + + ma_free(pDelay->pBuffer, pAllocationCallbacks); +} + +MA_API ma_result ma_delay_process_pcm_frames(ma_delay* pDelay, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + ma_uint32 iFrame; + ma_uint32 iChannel; + float* pFramesOutF32 = (float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + if (pDelay == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pDelay->config.channels; iChannel += 1) { + ma_uint32 iBuffer = (pDelay->cursor * pDelay->config.channels) + iChannel; + + if (pDelay->config.delayStart) { + /* Delayed start. */ + + /* Read */ + pFramesOutF32[iChannel] = pDelay->pBuffer[iBuffer] * pDelay->config.wet; + + /* Feedback */ + pDelay->pBuffer[iBuffer] = (pDelay->pBuffer[iBuffer] * pDelay->config.decay) + (pFramesInF32[iChannel] * pDelay->config.dry); + } else { + /* Immediate start */ + + /* Feedback */ + pDelay->pBuffer[iBuffer] = (pDelay->pBuffer[iBuffer] * pDelay->config.decay) + (pFramesInF32[iChannel] * pDelay->config.dry); + + /* Read */ + pFramesOutF32[iChannel] = pDelay->pBuffer[iBuffer] * pDelay->config.wet; + } + } + + pDelay->cursor = (pDelay->cursor + 1) % pDelay->bufferSizeInFrames; + + pFramesOutF32 += pDelay->config.channels; + pFramesInF32 += pDelay->config.channels; + } + + return MA_SUCCESS; +} + +MA_API void ma_delay_set_wet(ma_delay* pDelay, float value) +{ + if (pDelay == NULL) { + return; + } + + pDelay->config.wet = value; +} + +MA_API float ma_delay_get_wet(const ma_delay* pDelay) +{ + if (pDelay == NULL) { + return 0; + } + + return pDelay->config.wet; +} + +MA_API void ma_delay_set_dry(ma_delay* pDelay, float value) +{ + if (pDelay == NULL) { + return; + } + + pDelay->config.dry = value; +} + +MA_API float ma_delay_get_dry(const ma_delay* pDelay) +{ + if (pDelay == NULL) { + return 0; + } + + return pDelay->config.dry; +} + +MA_API void ma_delay_set_decay(ma_delay* pDelay, float value) +{ + if (pDelay == NULL) { + return; + } + + pDelay->config.decay = value; +} + +MA_API float ma_delay_get_decay(const ma_delay* pDelay) +{ + if (pDelay == NULL) { + return 0; + } + + return pDelay->config.decay; +} + + +MA_API ma_gainer_config ma_gainer_config_init(ma_uint32 channels, ma_uint32 smoothTimeInFrames) +{ + ma_gainer_config config; + + MA_ZERO_OBJECT(&config); + config.channels = channels; + config.smoothTimeInFrames = smoothTimeInFrames; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t oldGainsOffset; + size_t newGainsOffset; +} ma_gainer_heap_layout; + +static ma_result ma_gainer_get_heap_layout(const ma_gainer_config* pConfig, ma_gainer_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Old gains. */ + pHeapLayout->oldGainsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + + /* New gains. */ + pHeapLayout->newGainsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + + /* Alignment. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_gainer_get_heap_size(const ma_gainer_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_gainer_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_gainer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_gainer_init_preallocated(const ma_gainer_config* pConfig, void* pHeap, ma_gainer* pGainer) +{ + ma_result result; + ma_gainer_heap_layout heapLayout; + ma_uint32 iChannel; + + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pGainer); + + if (pConfig == NULL || pHeap == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_gainer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pGainer->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pGainer->pOldGains = (float*)ma_offset_ptr(pHeap, heapLayout.oldGainsOffset); + pGainer->pNewGains = (float*)ma_offset_ptr(pHeap, heapLayout.newGainsOffset); + pGainer->masterVolume = 1; + + pGainer->config = *pConfig; + pGainer->t = (ma_uint32)-1; /* No interpolation by default. */ + + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pGainer->pOldGains[iChannel] = 1; + pGainer->pNewGains[iChannel] = 1; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_init(const ma_gainer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_gainer* pGainer) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_gainer_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap allocation. */ + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_gainer_init_preallocated(pConfig, pHeap, pGainer); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pGainer->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_gainer_uninit(ma_gainer* pGainer, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pGainer == NULL) { + return; + } + + if (pGainer->_ownsHeap) { + ma_free(pGainer->_pHeap, pAllocationCallbacks); + } +} + +static float ma_gainer_calculate_current_gain(const ma_gainer* pGainer, ma_uint32 channel) +{ + float a = (float)pGainer->t / pGainer->config.smoothTimeInFrames; + return ma_mix_f32_fast(pGainer->pOldGains[channel], pGainer->pNewGains[channel], a); +} + +static /*__attribute__((noinline))*/ ma_result ma_gainer_process_pcm_frames_internal(ma_gainer * pGainer, void* MA_RESTRICT pFramesOut, const void* MA_RESTRICT pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + ma_uint64 interpolatedFrameCount; + + MA_ASSERT(pGainer != NULL); + + /* + We don't necessarily need to apply a linear interpolation for the entire frameCount frames. When + linear interpolation is not needed we can do a simple volume adjustment which will be more + efficient than a lerp with an alpha value of 1. + + To do this, all we need to do is determine how many frames need to have a lerp applied. Then we + just process that number of frames with linear interpolation. After that we run on an optimized + path which just applies the new gains without a lerp. + */ + if (pGainer->t >= pGainer->config.smoothTimeInFrames) { + interpolatedFrameCount = 0; + } else { + interpolatedFrameCount = pGainer->t - pGainer->config.smoothTimeInFrames; + if (interpolatedFrameCount > frameCount) { + interpolatedFrameCount = frameCount; + } + } + + /* + Start off with our interpolated frames. When we do this, we'll adjust frameCount and our pointers + so that the fast path can work naturally without consideration of the interpolated path. + */ + if (interpolatedFrameCount > 0) { + /* We can allow the input and output buffers to be null in which case we'll just update the internal timer. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + /* + All we're really doing here is moving the old gains towards the new gains. We don't want to + be modifying the gains inside the ma_gainer object because that will break things. Instead + we can make a copy here on the stack. For extreme channel counts we can fall back to a slower + implementation which just uses a standard lerp. + */ + float* pFramesOutF32 = (float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + float a = (float)pGainer->t / pGainer->config.smoothTimeInFrames; + float d = 1.0f / pGainer->config.smoothTimeInFrames; + + if (pGainer->config.channels <= 32) { + float pRunningGain[32]; + float pRunningGainDelta[32]; /* Could this be heap-allocated as part of the ma_gainer object? */ + + /* Initialize the running gain. */ + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + float t = (pGainer->pNewGains[iChannel] - pGainer->pOldGains[iChannel]) * pGainer->masterVolume; + pRunningGainDelta[iChannel] = t * d; + pRunningGain[iChannel] = (pGainer->pOldGains[iChannel] * pGainer->masterVolume) + (t * a); + } + + iFrame = 0; + + /* Optimized paths for common channel counts. This is mostly just experimenting with some SIMD ideas. It's not necessarily final. */ + if (pGainer->config.channels == 2) { +#if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_uint64 unrolledLoopCount = interpolatedFrameCount >> 1; + + /* Expand some arrays so we can have a clean SIMD loop below. */ + __m128 runningGainDelta0 = _mm_set_ps(pRunningGainDelta[1], pRunningGainDelta[0], pRunningGainDelta[1], pRunningGainDelta[0]); + __m128 runningGain0 = _mm_set_ps(pRunningGain[1] + pRunningGainDelta[1], pRunningGain[0] + pRunningGainDelta[0], pRunningGain[1], pRunningGain[0]); + + for (; iFrame < unrolledLoopCount; iFrame += 1) { + _mm_storeu_ps(&pFramesOutF32[iFrame*4 + 0], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*4 + 0]), runningGain0)); + runningGain0 = _mm_add_ps(runningGain0, runningGainDelta0); + } + + iFrame = unrolledLoopCount << 1; + } else +#endif + { + /* + Two different scalar implementations here. Clang (and I assume GCC) will vectorize + both of these, but the bottom version results in a nicer vectorization with less + instructions emitted. The problem, however, is that the bottom version runs slower + when compiled with MSVC. The top version will be partially vectorized by MSVC. + */ +#if defined(_MSC_VER) && !defined(__clang__) + ma_uint64 unrolledLoopCount = interpolatedFrameCount >> 1; + + /* Expand some arrays so we can have a clean 4x SIMD operation in the loop. */ + pRunningGainDelta[2] = pRunningGainDelta[0]; + pRunningGainDelta[3] = pRunningGainDelta[1]; + pRunningGain[2] = pRunningGain[0] + pRunningGainDelta[0]; + pRunningGain[3] = pRunningGain[1] + pRunningGainDelta[1]; + + for (; iFrame < unrolledLoopCount; iFrame += 1) { + pFramesOutF32[iFrame*4 + 0] = pFramesInF32[iFrame*4 + 0] * pRunningGain[0]; + pFramesOutF32[iFrame*4 + 1] = pFramesInF32[iFrame*4 + 1] * pRunningGain[1]; + pFramesOutF32[iFrame*4 + 2] = pFramesInF32[iFrame*4 + 2] * pRunningGain[2]; + pFramesOutF32[iFrame*4 + 3] = pFramesInF32[iFrame*4 + 3] * pRunningGain[3]; + + /* Move the running gain forward towards the new gain. */ + pRunningGain[0] += pRunningGainDelta[0]; + pRunningGain[1] += pRunningGainDelta[1]; + pRunningGain[2] += pRunningGainDelta[2]; + pRunningGain[3] += pRunningGainDelta[3]; + } + + iFrame = unrolledLoopCount << 1; +#else + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < 2; iChannel += 1) { + pFramesOutF32[iFrame*2 + iChannel] = pFramesInF32[iFrame*2 + iChannel] * pRunningGain[iChannel]; + } + + for (iChannel = 0; iChannel < 2; iChannel += 1) { + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } +#endif + } + } else if (pGainer->config.channels == 6) { +#if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + /* + For 6 channels things are a bit more complicated because 6 isn't cleanly divisible by 4. We need to do 2 frames + at a time, meaning we'll be doing 12 samples in a group. Like the stereo case we'll need to expand some arrays + so we can do clean 4x SIMD operations. + */ + ma_uint64 unrolledLoopCount = interpolatedFrameCount >> 1; + + /* Expand some arrays so we can have a clean SIMD loop below. */ + __m128 runningGainDelta0 = _mm_set_ps(pRunningGainDelta[3], pRunningGainDelta[2], pRunningGainDelta[1], pRunningGainDelta[0]); + __m128 runningGainDelta1 = _mm_set_ps(pRunningGainDelta[1], pRunningGainDelta[0], pRunningGainDelta[5], pRunningGainDelta[4]); + __m128 runningGainDelta2 = _mm_set_ps(pRunningGainDelta[5], pRunningGainDelta[4], pRunningGainDelta[3], pRunningGainDelta[2]); + + __m128 runningGain0 = _mm_set_ps(pRunningGain[3], pRunningGain[2], pRunningGain[1], pRunningGain[0]); + __m128 runningGain1 = _mm_set_ps(pRunningGain[1] + pRunningGainDelta[1], pRunningGain[0] + pRunningGainDelta[0], pRunningGain[5], pRunningGain[4]); + __m128 runningGain2 = _mm_set_ps(pRunningGain[5] + pRunningGainDelta[5], pRunningGain[4] + pRunningGainDelta[4], pRunningGain[3] + pRunningGainDelta[3], pRunningGain[2] + pRunningGainDelta[2]); + + for (; iFrame < unrolledLoopCount; iFrame += 1) { + _mm_storeu_ps(&pFramesOutF32[iFrame*12 + 0], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*12 + 0]), runningGain0)); + _mm_storeu_ps(&pFramesOutF32[iFrame*12 + 4], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*12 + 4]), runningGain1)); + _mm_storeu_ps(&pFramesOutF32[iFrame*12 + 8], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*12 + 8]), runningGain2)); + + runningGain0 = _mm_add_ps(runningGain0, runningGainDelta0); + runningGain1 = _mm_add_ps(runningGain1, runningGainDelta1); + runningGain2 = _mm_add_ps(runningGain2, runningGainDelta2); + } + + iFrame = unrolledLoopCount << 1; + } else +#endif + { + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < 6; iChannel += 1) { + pFramesOutF32[iFrame*6 + iChannel] = pFramesInF32[iFrame*6 + iChannel] * pRunningGain[iChannel]; + } + + /* Move the running gain forward towards the new gain. */ + for (iChannel = 0; iChannel < 6; iChannel += 1) { + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } + } + } else if (pGainer->config.channels == 8) { + /* For 8 channels we can just go over frame by frame and do all eight channels as 2 separate 4x SIMD operations. */ +#if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + __m128 runningGainDelta0 = _mm_loadu_ps(&pRunningGainDelta[0]); + __m128 runningGainDelta1 = _mm_loadu_ps(&pRunningGainDelta[4]); + __m128 runningGain0 = _mm_loadu_ps(&pRunningGain[0]); + __m128 runningGain1 = _mm_loadu_ps(&pRunningGain[4]); + + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + _mm_storeu_ps(&pFramesOutF32[iFrame*8 + 0], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*8 + 0]), runningGain0)); + _mm_storeu_ps(&pFramesOutF32[iFrame*8 + 4], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*8 + 4]), runningGain1)); + + runningGain0 = _mm_add_ps(runningGain0, runningGainDelta0); + runningGain1 = _mm_add_ps(runningGain1, runningGainDelta1); + } + } else +#endif + { + /* This is crafted so that it auto-vectorizes when compiled with Clang. */ + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < 8; iChannel += 1) { + pFramesOutF32[iFrame*8 + iChannel] = pFramesInF32[iFrame*8 + iChannel] * pRunningGain[iChannel]; + } + + /* Move the running gain forward towards the new gain. */ + for (iChannel = 0; iChannel < 8; iChannel += 1) { + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } + } + } + + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pGainer->config.channels + iChannel] = pFramesInF32[iFrame*pGainer->config.channels + iChannel] * pRunningGain[iChannel]; + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } + } else { + /* Slower path for extreme channel counts where we can't fit enough on the stack. We could also move this to the heap as part of the ma_gainer object which might even be better since it'll only be updated when the gains actually change. */ + for (iFrame = 0; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pGainer->config.channels + iChannel] = pFramesInF32[iFrame*pGainer->config.channels + iChannel] * ma_mix_f32_fast(pGainer->pOldGains[iChannel], pGainer->pNewGains[iChannel], a) * pGainer->masterVolume; + } + + a += d; + } + } + } + + /* Make sure the timer is updated. */ + pGainer->t = (ma_uint32)ma_min(pGainer->t + interpolatedFrameCount, pGainer->config.smoothTimeInFrames); + + /* Adjust our arguments so the next part can work normally. */ + frameCount -= interpolatedFrameCount; + pFramesOut = ma_offset_ptr(pFramesOut, interpolatedFrameCount * sizeof(float)); + pFramesIn = ma_offset_ptr(pFramesIn, interpolatedFrameCount * sizeof(float)); + } + + /* All we need to do here is apply the new gains using an optimized path. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + if (pGainer->config.channels <= 32) { + float gains[32]; + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + gains[iChannel] = pGainer->pNewGains[iChannel] * pGainer->masterVolume; + } + + ma_copy_and_apply_volume_factor_per_channel_f32((float*)pFramesOut, (const float*)pFramesIn, frameCount, pGainer->config.channels, gains); + } else { + /* Slow path. Too many channels to fit on the stack. Need to apply a master volume as a separate path. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + ((float*)pFramesOut)[iFrame*pGainer->config.channels + iChannel] = ((const float*)pFramesIn)[iFrame*pGainer->config.channels + iChannel] * pGainer->pNewGains[iChannel] * pGainer->masterVolume; + } + } + } + } + + /* Now that some frames have been processed we need to make sure future changes to the gain are interpolated. */ + if (pGainer->t == (ma_uint32)-1) { + pGainer->t = (ma_uint32)ma_min(pGainer->config.smoothTimeInFrames, frameCount); + } + +#if 0 + if (pGainer->t >= pGainer->config.smoothTimeInFrames) { + /* Fast path. No gain calculation required. */ + ma_copy_and_apply_volume_factor_per_channel_f32(pFramesOutF32, pFramesInF32, frameCount, pGainer->config.channels, pGainer->pNewGains); + ma_apply_volume_factor_f32(pFramesOutF32, frameCount * pGainer->config.channels, pGainer->masterVolume); + + /* Now that some frames have been processed we need to make sure future changes to the gain are interpolated. */ + if (pGainer->t == (ma_uint32)-1) { + pGainer->t = pGainer->config.smoothTimeInFrames; + } + } else { + /* Slow path. Need to interpolate the gain for each channel individually. */ + + /* We can allow the input and output buffers to be null in which case we'll just update the internal timer. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + float a = (float)pGainer->t / pGainer->config.smoothTimeInFrames; + float d = 1.0f / pGainer->config.smoothTimeInFrames; + ma_uint32 channelCount = pGainer->config.channels; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channelCount; iChannel += 1) { + pFramesOutF32[iChannel] = pFramesInF32[iChannel] * ma_mix_f32_fast(pGainer->pOldGains[iChannel], pGainer->pNewGains[iChannel], a) * pGainer->masterVolume; + } + + pFramesOutF32 += channelCount; + pFramesInF32 += channelCount; + + a += d; + if (a > 1) { + a = 1; + } + } + } + + pGainer->t = (ma_uint32)ma_min(pGainer->t + frameCount, pGainer->config.smoothTimeInFrames); + +#if 0 /* Reference implementation. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + /* We can allow the input and output buffers to be null in which case we'll just update the internal timer. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + pFramesOutF32[iFrame * pGainer->config.channels + iChannel] = pFramesInF32[iFrame * pGainer->config.channels + iChannel] * ma_gainer_calculate_current_gain(pGainer, iChannel) * pGainer->masterVolume; + } + } + + /* Move interpolation time forward, but don't go beyond our smoothing time. */ + pGainer->t = ma_min(pGainer->t + 1, pGainer->config.smoothTimeInFrames); + } +#endif + } +#endif + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_process_pcm_frames(ma_gainer* pGainer, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + /* + ma_gainer_process_pcm_frames_internal() marks pFramesOut and pFramesIn with MA_RESTRICT which + helps with auto-vectorization. + */ + return ma_gainer_process_pcm_frames_internal(pGainer, pFramesOut, pFramesIn, frameCount); +} + +static void ma_gainer_set_gain_by_index(ma_gainer* pGainer, float newGain, ma_uint32 iChannel) +{ + pGainer->pOldGains[iChannel] = ma_gainer_calculate_current_gain(pGainer, iChannel); + pGainer->pNewGains[iChannel] = newGain; +} + +static void ma_gainer_reset_smoothing_time(ma_gainer* pGainer) +{ + if (pGainer->t == (ma_uint32)-1) { + pGainer->t = pGainer->config.smoothTimeInFrames; /* No smoothing required for initial gains setting. */ + } else { + pGainer->t = 0; + } +} + +MA_API ma_result ma_gainer_set_gain(ma_gainer* pGainer, float newGain) +{ + ma_uint32 iChannel; + + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + ma_gainer_set_gain_by_index(pGainer, newGain, iChannel); + } + + /* The smoothing time needs to be reset to ensure we always interpolate by the configured smoothing time, but only if it's not the first setting. */ + ma_gainer_reset_smoothing_time(pGainer); + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_set_gains(ma_gainer* pGainer, float* pNewGains) +{ + ma_uint32 iChannel; + + if (pGainer == NULL || pNewGains == NULL) { + return MA_INVALID_ARGS; + } + + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + ma_gainer_set_gain_by_index(pGainer, pNewGains[iChannel], iChannel); + } + + /* The smoothing time needs to be reset to ensure we always interpolate by the configured smoothing time, but only if it's not the first setting. */ + ma_gainer_reset_smoothing_time(pGainer); + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_set_master_volume(ma_gainer* pGainer, float volume) +{ + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + pGainer->masterVolume = volume; + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_get_master_volume(const ma_gainer* pGainer, float* pVolume) +{ + if (pGainer == NULL || pVolume == NULL) { + return MA_INVALID_ARGS; + } + + *pVolume = pGainer->masterVolume; + + return MA_SUCCESS; +} + + +MA_API ma_panner_config ma_panner_config_init(ma_format format, ma_uint32 channels) +{ + ma_panner_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.mode = ma_pan_mode_balance; /* Set to balancing mode by default because it's consistent with other audio engines and most likely what the caller is expecting. */ + config.pan = 0; + + return config; +} + + +MA_API ma_result ma_panner_init(const ma_panner_config* pConfig, ma_panner* pPanner) +{ + if (pPanner == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pPanner); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pPanner->format = pConfig->format; + pPanner->channels = pConfig->channels; + pPanner->mode = pConfig->mode; + pPanner->pan = pConfig->pan; + + return MA_SUCCESS; +} + +static void ma_stereo_balance_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, float pan) +{ + ma_uint64 iFrame; + + if (pan > 0) { + float factor = 1.0f - pan; + if (pFramesOut == pFramesIn) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 0] = pFramesIn[iFrame*2 + 0] * factor; + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 0] = pFramesIn[iFrame*2 + 0] * factor; + pFramesOut[iFrame*2 + 1] = pFramesIn[iFrame*2 + 1]; + } + } + } else { + float factor = 1.0f + pan; + if (pFramesOut == pFramesIn) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 1] = pFramesIn[iFrame*2 + 1] * factor; + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 0] = pFramesIn[iFrame*2 + 0]; + pFramesOut[iFrame*2 + 1] = pFramesIn[iFrame*2 + 1] * factor; + } + } + } +} + +static void ma_stereo_balance_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, float pan) +{ + if (pan == 0) { + /* Fast path. No panning required. */ + if (pFramesOut == pFramesIn) { + /* No-op */ + } else { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } + + return; + } + + switch (format) { + case ma_format_f32: ma_stereo_balance_pcm_frames_f32((float*)pFramesOut, (float*)pFramesIn, frameCount, pan); break; + + /* Unknown format. Just copy. */ + default: + { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } break; + } +} + + +static void ma_stereo_pan_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, float pan) +{ + ma_uint64 iFrame; + + if (pan > 0) { + float factorL0 = 1.0f - pan; + float factorL1 = 0.0f + pan; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float sample0 = (pFramesIn[iFrame*2 + 0] * factorL0); + float sample1 = (pFramesIn[iFrame*2 + 0] * factorL1) + pFramesIn[iFrame*2 + 1]; + + pFramesOut[iFrame*2 + 0] = sample0; + pFramesOut[iFrame*2 + 1] = sample1; + } + } else { + float factorR0 = 0.0f - pan; + float factorR1 = 1.0f + pan; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float sample0 = pFramesIn[iFrame*2 + 0] + (pFramesIn[iFrame*2 + 1] * factorR0); + float sample1 = (pFramesIn[iFrame*2 + 1] * factorR1); + + pFramesOut[iFrame*2 + 0] = sample0; + pFramesOut[iFrame*2 + 1] = sample1; + } + } +} + +static void ma_stereo_pan_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, float pan) +{ + if (pan == 0) { + /* Fast path. No panning required. */ + if (pFramesOut == pFramesIn) { + /* No-op */ + } else { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } + + return; + } + + switch (format) { + case ma_format_f32: ma_stereo_pan_pcm_frames_f32((float*)pFramesOut, (float*)pFramesIn, frameCount, pan); break; + + /* Unknown format. Just copy. */ + default: + { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } break; + } +} + +MA_API ma_result ma_panner_process_pcm_frames(ma_panner* pPanner, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pPanner == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + if (pPanner->channels == 2) { + /* Stereo case. For now assume channel 0 is left and channel right is 1, but should probably add support for a channel map. */ + if (pPanner->mode == ma_pan_mode_balance) { + ma_stereo_balance_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->pan); + } else { + ma_stereo_pan_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->pan); + } + } else { + if (pPanner->channels == 1) { + /* Panning has no effect on mono streams. */ + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->channels); + } else { + /* For now we're not going to support non-stereo set ups. Not sure how I want to handle this case just yet. */ + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->channels); + } + } + + return MA_SUCCESS; +} + +MA_API void ma_panner_set_mode(ma_panner* pPanner, ma_pan_mode mode) +{ + if (pPanner == NULL) { + return; + } + + pPanner->mode = mode; +} + +MA_API ma_pan_mode ma_panner_get_mode(const ma_panner* pPanner) +{ + if (pPanner == NULL) { + return ma_pan_mode_balance; + } + + return pPanner->mode; +} + +MA_API void ma_panner_set_pan(ma_panner* pPanner, float pan) +{ + if (pPanner == NULL) { + return; + } + + pPanner->pan = ma_clamp(pan, -1.0f, 1.0f); +} + +MA_API float ma_panner_get_pan(const ma_panner* pPanner) +{ + if (pPanner == NULL) { + return 0; + } + + return pPanner->pan; +} + + + + +MA_API ma_fader_config ma_fader_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + ma_fader_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + + return config; +} + + +MA_API ma_result ma_fader_init(const ma_fader_config* pConfig, ma_fader* pFader) +{ + if (pFader == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFader); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only f32 is supported for now. */ + if (pConfig->format != ma_format_f32) { + return MA_INVALID_ARGS; + } + + pFader->config = *pConfig; + pFader->volumeBeg = 1; + pFader->volumeEnd = 1; + pFader->lengthInFrames = 0; + pFader->cursorInFrames = 0; + + return MA_SUCCESS; +} + +MA_API ma_result ma_fader_process_pcm_frames(ma_fader* pFader, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFader == NULL) { + return MA_INVALID_ARGS; + } + + /* If the cursor is still negative we need to just copy the absolute number of those frames, but no more than frameCount. */ + if (pFader->cursorInFrames < 0) { + ma_uint64 absCursorInFrames = (ma_uint64)0 - pFader->cursorInFrames; + if (absCursorInFrames > frameCount) { + absCursorInFrames = frameCount; + } + + ma_copy_pcm_frames(pFramesOut, pFramesIn, absCursorInFrames, pFader->config.format, pFader->config.channels); + + pFader->cursorInFrames += absCursorInFrames; + frameCount -= absCursorInFrames; + pFramesOut = ma_offset_ptr(pFramesOut, ma_get_bytes_per_frame(pFader->config.format, pFader->config.channels)*absCursorInFrames); + pFramesIn = ma_offset_ptr(pFramesIn, ma_get_bytes_per_frame(pFader->config.format, pFader->config.channels)*absCursorInFrames); + } + + if (pFader->cursorInFrames >= 0) { + /* + For now we need to clamp frameCount so that the cursor never overflows 32-bits. This is required for + the conversion to a float which we use for the linear interpolation. This might be changed later. + */ + if (frameCount + pFader->cursorInFrames > UINT_MAX) { + frameCount = UINT_MAX - pFader->cursorInFrames; + } + + /* Optimized path if volumeBeg and volumeEnd are equal. */ + if (pFader->volumeBeg == pFader->volumeEnd) { + if (pFader->volumeBeg == 1) { + /* Straight copy. */ + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, pFader->config.format, pFader->config.channels); + } else { + /* Copy with volume. */ + ma_copy_and_apply_volume_and_clip_pcm_frames(pFramesOut, pFramesIn, frameCount, pFader->config.format, pFader->config.channels, pFader->volumeBeg); + } + } else { + /* Slower path. Volumes are different, so may need to do an interpolation. */ + if ((ma_uint64)pFader->cursorInFrames >= pFader->lengthInFrames) { + /* Fast path. We've gone past the end of the fade period so just apply the end volume to all samples. */ + ma_copy_and_apply_volume_and_clip_pcm_frames(pFramesOut, pFramesIn, frameCount, pFader->config.format, pFader->config.channels, pFader->volumeEnd); + } else { + /* Slow path. This is where we do the actual fading. */ + ma_uint64 iFrame; + ma_uint32 iChannel; + + /* For now we only support f32. Support for other formats might be added later. */ + if (pFader->config.format == ma_format_f32) { + const float* pFramesInF32 = (const float*)pFramesIn; + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float a = (ma_uint32)ma_min(pFader->cursorInFrames + iFrame, pFader->lengthInFrames) / (float)((ma_uint32)pFader->lengthInFrames); /* Safe cast due to the frameCount clamp at the top of this function. */ + float volume = ma_mix_f32_fast(pFader->volumeBeg, pFader->volumeEnd, a); + + for (iChannel = 0; iChannel < pFader->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pFader->config.channels + iChannel] = pFramesInF32[iFrame*pFader->config.channels + iChannel] * volume; + } + } + } else { + return MA_NOT_IMPLEMENTED; + } + } + } + } + + pFader->cursorInFrames += frameCount; + + return MA_SUCCESS; +} + +MA_API void ma_fader_get_data_format(const ma_fader* pFader, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate) +{ + if (pFader == NULL) { + return; + } + + if (pFormat != NULL) { + *pFormat = pFader->config.format; + } + + if (pChannels != NULL) { + *pChannels = pFader->config.channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pFader->config.sampleRate; + } +} + +MA_API void ma_fader_set_fade(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames) +{ + ma_fader_set_fade_ex(pFader, volumeBeg, volumeEnd, lengthInFrames, 0); +} + +MA_API void ma_fader_set_fade_ex(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames, ma_int64 startOffsetInFrames) +{ + if (pFader == NULL) { + return; + } + + /* If the volume is negative, use current volume. */ + if (volumeBeg < 0) { + volumeBeg = ma_fader_get_current_volume(pFader); + } + + /* + The length needs to be clamped to 32-bits due to how we convert it to a float for linear + interpolation reasons. I might change this requirement later, but for now it's not important. + */ + if (lengthInFrames > UINT_MAX) { + lengthInFrames = UINT_MAX; + } + + /* The start offset needs to be clamped to ensure it doesn't overflow a signed number. */ + if (startOffsetInFrames > INT_MAX) { + startOffsetInFrames = INT_MAX; + } + + pFader->volumeBeg = volumeBeg; + pFader->volumeEnd = volumeEnd; + pFader->lengthInFrames = lengthInFrames; + pFader->cursorInFrames = -startOffsetInFrames; +} + +MA_API float ma_fader_get_current_volume(const ma_fader* pFader) +{ + if (pFader == NULL) { + return 0.0f; + } + + /* Any frames prior to the start of the fade period will be at unfaded volume. */ + if (pFader->cursorInFrames < 0) { + return 1.0f; + } + + /* The current volume depends on the position of the cursor. */ + if (pFader->cursorInFrames == 0) { + return pFader->volumeBeg; + } else if ((ma_uint64)pFader->cursorInFrames >= pFader->lengthInFrames) { /* Safe case because the < 0 case was checked above. */ + return pFader->volumeEnd; + } else { + /* The cursor is somewhere inside the fading period. We can figure this out with a simple linear interpoluation between volumeBeg and volumeEnd based on our cursor position. */ + return ma_mix_f32_fast(pFader->volumeBeg, pFader->volumeEnd, (ma_uint32)pFader->cursorInFrames / (float)((ma_uint32)pFader->lengthInFrames)); /* Safe cast to uint32 because we clamp it in ma_fader_process_pcm_frames(). */ + } +} + + + + + +MA_API ma_vec3f ma_vec3f_init_3f(float x, float y, float z) +{ + ma_vec3f v; + + v.x = x; + v.y = y; + v.z = z; + + return v; +} + +MA_API ma_vec3f ma_vec3f_sub(ma_vec3f a, ma_vec3f b) +{ + return ma_vec3f_init_3f( + a.x - b.x, + a.y - b.y, + a.z - b.z + ); +} + +MA_API ma_vec3f ma_vec3f_neg(ma_vec3f a) +{ + return ma_vec3f_init_3f( + -a.x, + -a.y, + -a.z + ); +} + +MA_API float ma_vec3f_dot(ma_vec3f a, ma_vec3f b) +{ + return a.x*b.x + a.y*b.y + a.z*b.z; +} + +MA_API float ma_vec3f_len2(ma_vec3f v) +{ + return ma_vec3f_dot(v, v); +} + +MA_API float ma_vec3f_len(ma_vec3f v) +{ + return (float)ma_sqrtd(ma_vec3f_len2(v)); +} + + + +MA_API float ma_vec3f_dist(ma_vec3f a, ma_vec3f b) +{ + return ma_vec3f_len(ma_vec3f_sub(a, b)); +} + +MA_API ma_vec3f ma_vec3f_normalize(ma_vec3f v) +{ + float invLen; + float len2 = ma_vec3f_len2(v); + if (len2 == 0) { + return ma_vec3f_init_3f(0, 0, 0); + } + + invLen = ma_rsqrtf(len2); + v.x *= invLen; + v.y *= invLen; + v.z *= invLen; + + return v; +} + +MA_API ma_vec3f ma_vec3f_cross(ma_vec3f a, ma_vec3f b) +{ + return ma_vec3f_init_3f( + a.y*b.z - a.z*b.y, + a.z*b.x - a.x*b.z, + a.x*b.y - a.y*b.x + ); +} + + +MA_API void ma_atomic_vec3f_init(ma_atomic_vec3f* v, ma_vec3f value) +{ + v->v = value; + v->lock = 0; /* Important this is initialized to 0. */ +} + +MA_API void ma_atomic_vec3f_set(ma_atomic_vec3f* v, ma_vec3f value) +{ + ma_spinlock_lock(&v->lock); + { + v->v = value; + } + ma_spinlock_unlock(&v->lock); +} + +MA_API ma_vec3f ma_atomic_vec3f_get(ma_atomic_vec3f* v) +{ + ma_vec3f r; + + ma_spinlock_lock(&v->lock); + { + r = v->v; + } + ma_spinlock_unlock(&v->lock); + + return r; +} + + + +static void ma_channel_map_apply_f32(float* pFramesOut, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, const float* pFramesIn, const ma_channel* pChannelMapIn, ma_uint32 channelsIn, ma_uint64 frameCount, ma_channel_mix_mode mode, ma_mono_expansion_mode monoExpansionMode); +static ma_bool32 ma_is_spatial_channel_position(ma_channel channelPosition); + + +#ifndef MA_DEFAULT_SPEED_OF_SOUND +#define MA_DEFAULT_SPEED_OF_SOUND 343.3f +#endif + +/* +These vectors represent the direction that speakers are facing from the center point. They're used +for panning in the spatializer. Must be normalized. +*/ +static ma_vec3f g_maChannelDirections[MA_CHANNEL_POSITION_COUNT] = { + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_NONE */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_MONO */ + {-0.7071f, 0.0f, -0.7071f }, /* MA_CHANNEL_FRONT_LEFT */ + {+0.7071f, 0.0f, -0.7071f }, /* MA_CHANNEL_FRONT_RIGHT */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_FRONT_CENTER */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_LFE */ + {-0.7071f, 0.0f, +0.7071f }, /* MA_CHANNEL_BACK_LEFT */ + {+0.7071f, 0.0f, +0.7071f }, /* MA_CHANNEL_BACK_RIGHT */ + {-0.3162f, 0.0f, -0.9487f }, /* MA_CHANNEL_FRONT_LEFT_CENTER */ + {+0.3162f, 0.0f, -0.9487f }, /* MA_CHANNEL_FRONT_RIGHT_CENTER */ + { 0.0f, 0.0f, +1.0f }, /* MA_CHANNEL_BACK_CENTER */ + {-1.0f, 0.0f, 0.0f }, /* MA_CHANNEL_SIDE_LEFT */ + {+1.0f, 0.0f, 0.0f }, /* MA_CHANNEL_SIDE_RIGHT */ + { 0.0f, +1.0f, 0.0f }, /* MA_CHANNEL_TOP_CENTER */ + {-0.5774f, +0.5774f, -0.5774f }, /* MA_CHANNEL_TOP_FRONT_LEFT */ + { 0.0f, +0.7071f, -0.7071f }, /* MA_CHANNEL_TOP_FRONT_CENTER */ + {+0.5774f, +0.5774f, -0.5774f }, /* MA_CHANNEL_TOP_FRONT_RIGHT */ + {-0.5774f, +0.5774f, +0.5774f }, /* MA_CHANNEL_TOP_BACK_LEFT */ + { 0.0f, +0.7071f, +0.7071f }, /* MA_CHANNEL_TOP_BACK_CENTER */ + {+0.5774f, +0.5774f, +0.5774f }, /* MA_CHANNEL_TOP_BACK_RIGHT */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_0 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_1 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_2 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_3 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_4 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_5 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_6 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_7 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_8 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_9 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_10 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_11 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_12 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_13 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_14 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_15 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_16 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_17 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_18 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_19 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_20 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_21 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_22 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_23 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_24 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_25 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_26 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_27 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_28 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_29 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_30 */ + { 0.0f, 0.0f, -1.0f } /* MA_CHANNEL_AUX_31 */ +}; + +static ma_vec3f ma_get_channel_direction(ma_channel channel) +{ + if (channel >= MA_CHANNEL_POSITION_COUNT) { + return ma_vec3f_init_3f(0, 0, -1); + } else { + return g_maChannelDirections[channel]; + } +} + + + +static float ma_attenuation_inverse(float distance, float minDistance, float maxDistance, float rolloff) +{ + if (minDistance >= maxDistance) { + return 1; /* To avoid division by zero. Do not attenuate. */ + } + + return minDistance / (minDistance + rolloff * (ma_clamp(distance, minDistance, maxDistance) - minDistance)); +} + +static float ma_attenuation_linear(float distance, float minDistance, float maxDistance, float rolloff) +{ + if (minDistance >= maxDistance) { + return 1; /* To avoid division by zero. Do not attenuate. */ + } + + return 1 - rolloff * (ma_clamp(distance, minDistance, maxDistance) - minDistance) / (maxDistance - minDistance); +} + +static float ma_attenuation_exponential(float distance, float minDistance, float maxDistance, float rolloff) +{ + if (minDistance >= maxDistance) { + return 1; /* To avoid division by zero. Do not attenuate. */ + } + + return (float)ma_powd(ma_clamp(distance, minDistance, maxDistance) / minDistance, -rolloff); +} + + +/* +Dopper Effect calculation taken from the OpenAL spec, with two main differences: + +1) The source to listener vector will have already been calcualted at an earlier step so we can +just use that directly. We need only the position of the source relative to the origin. + +2) We don't scale by a frequency because we actually just want the ratio which we'll plug straight +into the resampler directly. +*/ +static float ma_doppler_pitch(ma_vec3f relativePosition, ma_vec3f sourceVelocity, ma_vec3f listenVelocity, float speedOfSound, float dopplerFactor) +{ + float len; + float vls; + float vss; + + len = ma_vec3f_len(relativePosition); + + /* + There's a case where the position of the source will be right on top of the listener in which + case the length will be 0 and we'll end up with a division by zero. We can just return a ratio + of 1.0 in this case. This is not considered in the OpenAL spec, but is necessary. + */ + if (len == 0) { + return 1.0; + } + + vls = ma_vec3f_dot(relativePosition, listenVelocity) / len; + vss = ma_vec3f_dot(relativePosition, sourceVelocity) / len; + + vls = ma_min(vls, speedOfSound / dopplerFactor); + vss = ma_min(vss, speedOfSound / dopplerFactor); + + return (speedOfSound - dopplerFactor*vls) / (speedOfSound - dopplerFactor*vss); +} + + +static void ma_get_default_channel_map_for_spatializer(ma_channel* pChannelMap, size_t channelMapCap, ma_uint32 channelCount) +{ + /* + Special case for stereo. Want to default the left and right speakers to side left and side + right so that they're facing directly down the X axis rather than slightly forward. Not + doing this will result in sounds being quieter when behind the listener. This might + actually be good for some scenerios, but I don't think it's an appropriate default because + it can be a bit unexpected. + */ + if (channelCount == 2) { + pChannelMap[0] = MA_CHANNEL_SIDE_LEFT; + pChannelMap[1] = MA_CHANNEL_SIDE_RIGHT; + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount); + } +} + + +MA_API ma_spatializer_listener_config ma_spatializer_listener_config_init(ma_uint32 channelsOut) +{ + ma_spatializer_listener_config config; + + MA_ZERO_OBJECT(&config); + config.channelsOut = channelsOut; + config.pChannelMapOut = NULL; + config.handedness = ma_handedness_right; + config.worldUp = ma_vec3f_init_3f(0, 1, 0); + config.coneInnerAngleInRadians = 6.283185f; /* 360 degrees. */ + config.coneOuterAngleInRadians = 6.283185f; /* 360 degrees. */ + config.coneOuterGain = 0; + config.speedOfSound = 343.3f; /* Same as OpenAL. Used for doppler effect. */ + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t channelMapOutOffset; +} ma_spatializer_listener_heap_layout; + +static ma_result ma_spatializer_listener_get_heap_layout(const ma_spatializer_listener_config* pConfig, ma_spatializer_listener_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Channel map. We always need this, even for passthroughs. */ + pHeapLayout->channelMapOutOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(*pConfig->pChannelMapOut) * pConfig->channelsOut); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_spatializer_listener_get_heap_size(const ma_spatializer_listener_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_spatializer_listener_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_spatializer_listener_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_listener_init_preallocated(const ma_spatializer_listener_config* pConfig, void* pHeap, ma_spatializer_listener* pListener) +{ + ma_result result; + ma_spatializer_listener_heap_layout heapLayout; + + if (pListener == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pListener); + + result = ma_spatializer_listener_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pListener->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pListener->config = *pConfig; + ma_atomic_vec3f_init(&pListener->position, ma_vec3f_init_3f(0, 0, 0)); + ma_atomic_vec3f_init(&pListener->direction, ma_vec3f_init_3f(0, 0, -1)); + ma_atomic_vec3f_init(&pListener->velocity, ma_vec3f_init_3f(0, 0, 0)); + pListener->isEnabled = MA_TRUE; + + /* Swap the forward direction if we're left handed (it was initialized based on right handed). */ + if (pListener->config.handedness == ma_handedness_left) { + ma_vec3f negDir = ma_vec3f_neg(ma_spatializer_listener_get_direction(pListener)); + ma_spatializer_listener_set_direction(pListener, negDir.x, negDir.y, negDir.z); + } + + + /* We must always have a valid channel map. */ + pListener->config.pChannelMapOut = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapOutOffset); + + /* Use a slightly different default channel map for stereo. */ + if (pConfig->pChannelMapOut == NULL) { + ma_get_default_channel_map_for_spatializer(pListener->config.pChannelMapOut, pConfig->channelsOut, pConfig->channelsOut); + } else { + ma_channel_map_copy_or_default(pListener->config.pChannelMapOut, pConfig->channelsOut, pConfig->pChannelMapOut, pConfig->channelsOut); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_listener_init(const ma_spatializer_listener_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer_listener* pListener) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_spatializer_listener_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_spatializer_listener_init_preallocated(pConfig, pHeap, pListener); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pListener->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_spatializer_listener_uninit(ma_spatializer_listener* pListener, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pListener == NULL) { + return; + } + + if (pListener->_ownsHeap) { + ma_free(pListener->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_channel* ma_spatializer_listener_get_channel_map(ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return NULL; + } + + return pListener->config.pChannelMapOut; +} + +MA_API void ma_spatializer_listener_set_cone(ma_spatializer_listener* pListener, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pListener == NULL) { + return; + } + + pListener->config.coneInnerAngleInRadians = innerAngleInRadians; + pListener->config.coneOuterAngleInRadians = outerAngleInRadians; + pListener->config.coneOuterGain = outerGain; +} + +MA_API void ma_spatializer_listener_get_cone(const ma_spatializer_listener* pListener, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pListener == NULL) { + return; + } + + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = pListener->config.coneInnerAngleInRadians; + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = pListener->config.coneOuterAngleInRadians; + } + + if (pOuterGain != NULL) { + *pOuterGain = pListener->config.coneOuterGain; + } +} + +MA_API void ma_spatializer_listener_set_position(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + ma_atomic_vec3f_set(&pListener->position, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_listener_get_position(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pListener->position); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_listener_set_direction(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + ma_atomic_vec3f_set(&pListener->direction, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_listener_get_direction(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pListener->direction); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_listener_set_velocity(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + ma_atomic_vec3f_set(&pListener->velocity, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_listener_get_velocity(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pListener->velocity); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_listener_set_speed_of_sound(ma_spatializer_listener* pListener, float speedOfSound) +{ + if (pListener == NULL) { + return; + } + + pListener->config.speedOfSound = speedOfSound; +} + +MA_API float ma_spatializer_listener_get_speed_of_sound(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return 0; + } + + return pListener->config.speedOfSound; +} + +MA_API void ma_spatializer_listener_set_world_up(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + pListener->config.worldUp = ma_vec3f_init_3f(x, y, z); +} + +MA_API ma_vec3f ma_spatializer_listener_get_world_up(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 1, 0); + } + + return pListener->config.worldUp; +} + +MA_API void ma_spatializer_listener_set_enabled(ma_spatializer_listener* pListener, ma_bool32 isEnabled) +{ + if (pListener == NULL) { + return; + } + + pListener->isEnabled = isEnabled; +} + +MA_API ma_bool32 ma_spatializer_listener_is_enabled(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return MA_FALSE; + } + + return pListener->isEnabled; +} + + + + +MA_API ma_spatializer_config ma_spatializer_config_init(ma_uint32 channelsIn, ma_uint32 channelsOut) +{ + ma_spatializer_config config; + + MA_ZERO_OBJECT(&config); + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.pChannelMapIn = NULL; + config.attenuationModel = ma_attenuation_model_inverse; + config.positioning = ma_positioning_absolute; + config.handedness = ma_handedness_right; + config.minGain = 0; + config.maxGain = 1; + config.minDistance = 1; + config.maxDistance = MA_FLT_MAX; + config.rolloff = 1; + config.coneInnerAngleInRadians = 6.283185f; /* 360 degrees. */ + config.coneOuterAngleInRadians = 6.283185f; /* 360 degress. */ + config.coneOuterGain = 0.0f; + config.dopplerFactor = 1; + config.directionalAttenuationFactor = 1; + config.minSpatializationChannelGain = 0.2f; + config.gainSmoothTimeInFrames = 360; /* 7.5ms @ 48K. */ + + return config; +} + + +static ma_gainer_config ma_spatializer_gainer_config_init(const ma_spatializer_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + return ma_gainer_config_init(pConfig->channelsOut, pConfig->gainSmoothTimeInFrames); +} + +static ma_result ma_spatializer_validate_config(const ma_spatializer_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + + if (pConfig->channelsIn == 0 || pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +typedef struct +{ + size_t sizeInBytes; + size_t channelMapInOffset; + size_t newChannelGainsOffset; + size_t gainerOffset; +} ma_spatializer_heap_layout; + +static ma_result ma_spatializer_get_heap_layout(const ma_spatializer_config* pConfig, ma_spatializer_heap_layout* pHeapLayout) +{ + ma_result result; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_spatializer_validate_config(pConfig); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes = 0; + + /* Channel map. */ + pHeapLayout->channelMapInOffset = MA_SIZE_MAX; /* <-- MA_SIZE_MAX indicates no allocation necessary. */ + if (pConfig->pChannelMapIn != NULL) { + pHeapLayout->channelMapInOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(*pConfig->pChannelMapIn) * pConfig->channelsIn); + } + + /* New channel gains for output. */ + pHeapLayout->newChannelGainsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(float) * pConfig->channelsOut); + + /* Gainer. */ + { + size_t gainerHeapSizeInBytes; + ma_gainer_config gainerConfig; + + gainerConfig = ma_spatializer_gainer_config_init(pConfig); + + result = ma_gainer_get_heap_size(&gainerConfig, &gainerHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->gainerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(gainerHeapSizeInBytes); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_get_heap_size(const ma_spatializer_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_spatializer_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; /* Safety. */ + + result = ma_spatializer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_spatializer_init_preallocated(const ma_spatializer_config* pConfig, void* pHeap, ma_spatializer* pSpatializer) +{ + ma_result result; + ma_spatializer_heap_layout heapLayout; + ma_gainer_config gainerConfig; + + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pSpatializer); + + if (pConfig == NULL || pHeap == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_spatializer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pSpatializer->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pSpatializer->channelsIn = pConfig->channelsIn; + pSpatializer->channelsOut = pConfig->channelsOut; + pSpatializer->attenuationModel = pConfig->attenuationModel; + pSpatializer->positioning = pConfig->positioning; + pSpatializer->handedness = pConfig->handedness; + pSpatializer->minGain = pConfig->minGain; + pSpatializer->maxGain = pConfig->maxGain; + pSpatializer->minDistance = pConfig->minDistance; + pSpatializer->maxDistance = pConfig->maxDistance; + pSpatializer->rolloff = pConfig->rolloff; + pSpatializer->coneInnerAngleInRadians = pConfig->coneInnerAngleInRadians; + pSpatializer->coneOuterAngleInRadians = pConfig->coneOuterAngleInRadians; + pSpatializer->coneOuterGain = pConfig->coneOuterGain; + pSpatializer->dopplerFactor = pConfig->dopplerFactor; + pSpatializer->minSpatializationChannelGain = pConfig->minSpatializationChannelGain; + pSpatializer->directionalAttenuationFactor = pConfig->directionalAttenuationFactor; + pSpatializer->gainSmoothTimeInFrames = pConfig->gainSmoothTimeInFrames; + ma_atomic_vec3f_init(&pSpatializer->position, ma_vec3f_init_3f(0, 0, 0)); + ma_atomic_vec3f_init(&pSpatializer->direction, ma_vec3f_init_3f(0, 0, -1)); + ma_atomic_vec3f_init(&pSpatializer->velocity, ma_vec3f_init_3f(0, 0, 0)); + pSpatializer->dopplerPitch = 1; + + /* Swap the forward direction if we're left handed (it was initialized based on right handed). */ + if (pSpatializer->handedness == ma_handedness_left) { + ma_vec3f negDir = ma_vec3f_neg(ma_spatializer_get_direction(pSpatializer)); + ma_spatializer_set_direction(pSpatializer, negDir.x, negDir.y, negDir.z); + } + + /* Channel map. This will be on the heap. */ + if (pConfig->pChannelMapIn != NULL) { + pSpatializer->pChannelMapIn = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapInOffset); + ma_channel_map_copy_or_default(pSpatializer->pChannelMapIn, pSpatializer->channelsIn, pConfig->pChannelMapIn, pSpatializer->channelsIn); + } + + /* New channel gains for output channels. */ + pSpatializer->pNewChannelGainsOut = (float*)ma_offset_ptr(pHeap, heapLayout.newChannelGainsOffset); + + /* Gainer. */ + gainerConfig = ma_spatializer_gainer_config_init(pConfig); + + result = ma_gainer_init_preallocated(&gainerConfig, ma_offset_ptr(pHeap, heapLayout.gainerOffset), &pSpatializer->gainer); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the gainer. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_init(const ma_spatializer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer* pSpatializer) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + /* We'll need a heap allocation to retrieve the size. */ + result = ma_spatializer_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_spatializer_init_preallocated(pConfig, pHeap, pSpatializer); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pSpatializer->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_spatializer_uninit(ma_spatializer* pSpatializer, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pSpatializer == NULL) { + return; + } + + ma_gainer_uninit(&pSpatializer->gainer, pAllocationCallbacks); + + if (pSpatializer->_ownsHeap) { + ma_free(pSpatializer->_pHeap, pAllocationCallbacks); + } +} + +static float ma_calculate_angular_gain(ma_vec3f dirA, ma_vec3f dirB, float coneInnerAngleInRadians, float coneOuterAngleInRadians, float coneOuterGain) +{ + /* + Angular attenuation. + + Unlike distance gain, the math for this is not specified by the OpenAL spec so we'll just go ahead and figure + this out for ourselves at the expense of possibly being inconsistent with other implementations. + + To do cone attenuation, I'm just using the same math that we'd use to implement a basic spotlight in OpenGL. We + just need to get the direction from the source to the listener and then do a dot product against that and the + direction of the spotlight. Then we just compare that dot product against the cosine of the inner and outer + angles. If the dot product is greater than the the outer angle, we just use coneOuterGain. If it's less than + the inner angle, we just use a gain of 1. Otherwise we linearly interpolate between 1 and coneOuterGain. + */ + if (coneInnerAngleInRadians < 6.283185f) { + float angularGain = 1; + float cutoffInner = (float)ma_cosd(coneInnerAngleInRadians*0.5f); + float cutoffOuter = (float)ma_cosd(coneOuterAngleInRadians*0.5f); + float d; + + d = ma_vec3f_dot(dirA, dirB); + + if (d > cutoffInner) { + /* It's inside the inner angle. */ + angularGain = 1; + } else { + /* It's outside the inner angle. */ + if (d > cutoffOuter) { + /* It's between the inner and outer angle. We need to linearly interpolate between 1 and coneOuterGain. */ + angularGain = ma_mix_f32(coneOuterGain, 1, (d - cutoffOuter) / (cutoffInner - cutoffOuter)); + } else { + /* It's outside the outer angle. */ + angularGain = coneOuterGain; + } + } + + /*printf("d = %f; cutoffInner = %f; cutoffOuter = %f; angularGain = %f\n", d, cutoffInner, cutoffOuter, angularGain);*/ + return angularGain; + } else { + /* Inner angle is 360 degrees so no need to do any attenuation. */ + return 1; + } +} + +MA_API ma_result ma_spatializer_process_pcm_frames(ma_spatializer* pSpatializer, ma_spatializer_listener* pListener, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_channel* pChannelMapIn = pSpatializer->pChannelMapIn; + ma_channel* pChannelMapOut = pListener->config.pChannelMapOut; + + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + /* If we're not spatializing we need to run an optimized path. */ + if (ma_atomic_load_i32(&pSpatializer->attenuationModel) == ma_attenuation_model_none) { + if (ma_spatializer_listener_is_enabled(pListener)) { + /* No attenuation is required, but we'll need to do some channel conversion. */ + if (pSpatializer->channelsIn == pSpatializer->channelsOut) { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, ma_format_f32, pSpatializer->channelsIn); + } else { + ma_channel_map_apply_f32((float*)pFramesOut, pChannelMapOut, pSpatializer->channelsOut, (const float*)pFramesIn, pChannelMapIn, pSpatializer->channelsIn, frameCount, ma_channel_mix_mode_rectangular, ma_mono_expansion_mode_default); /* Safe casts to float* because f32 is the only supported format. */ + } + } else { + /* The listener is disabled. Output silence. */ + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, pSpatializer->channelsOut); + } + + /* + We're not doing attenuation so don't bother with doppler for now. I'm not sure if this is + the correct thinking so might need to review this later. + */ + pSpatializer->dopplerPitch = 1; + } else { + /* + Let's first determine which listener the sound is closest to. Need to keep in mind that we + might not have a world or any listeners, in which case we just spatializer based on the + listener being positioned at the origin (0, 0, 0). + */ + ma_vec3f relativePosNormalized; + ma_vec3f relativePos; /* The position relative to the listener. */ + ma_vec3f relativeDir; /* The direction of the sound, relative to the listener. */ + ma_vec3f listenerVel; /* The volocity of the listener. For doppler pitch calculation. */ + float speedOfSound; + float distance = 0; + float gain = 1; + ma_uint32 iChannel; + const ma_uint32 channelsOut = pSpatializer->channelsOut; + const ma_uint32 channelsIn = pSpatializer->channelsIn; + float minDistance = ma_spatializer_get_min_distance(pSpatializer); + float maxDistance = ma_spatializer_get_max_distance(pSpatializer); + float rolloff = ma_spatializer_get_rolloff(pSpatializer); + float dopplerFactor = ma_spatializer_get_doppler_factor(pSpatializer); + + /* + We'll need the listener velocity for doppler pitch calculations. The speed of sound is + defined by the listener, so we'll grab that here too. + */ + if (pListener != NULL) { + listenerVel = ma_spatializer_listener_get_velocity(pListener); + speedOfSound = pListener->config.speedOfSound; + } else { + listenerVel = ma_vec3f_init_3f(0, 0, 0); + speedOfSound = MA_DEFAULT_SPEED_OF_SOUND; + } + + if (pListener == NULL || ma_spatializer_get_positioning(pSpatializer) == ma_positioning_relative) { + /* There's no listener or we're using relative positioning. */ + relativePos = ma_spatializer_get_position(pSpatializer); + relativeDir = ma_spatializer_get_direction(pSpatializer); + } else { + /* + We've found a listener and we're using absolute positioning. We need to transform the + sound's position and direction so that it's relative to listener. Later on we'll use + this for determining the factors to apply to each channel to apply the panning effect. + */ + ma_spatializer_get_relative_position_and_direction(pSpatializer, pListener, &relativePos, &relativeDir); + } + + distance = ma_vec3f_len(relativePos); + + /* We've gathered the data, so now we can apply some spatialization. */ + switch (ma_spatializer_get_attenuation_model(pSpatializer)) { + case ma_attenuation_model_inverse: + { + gain = ma_attenuation_inverse(distance, minDistance, maxDistance, rolloff); + } break; + case ma_attenuation_model_linear: + { + gain = ma_attenuation_linear(distance, minDistance, maxDistance, rolloff); + } break; + case ma_attenuation_model_exponential: + { + gain = ma_attenuation_exponential(distance, minDistance, maxDistance, rolloff); + } break; + case ma_attenuation_model_none: + default: + { + gain = 1; + } break; + } + + /* Normalize the position. */ + if (distance > 0.001f) { + float distanceInv = 1/distance; + relativePosNormalized = relativePos; + relativePosNormalized.x *= distanceInv; + relativePosNormalized.y *= distanceInv; + relativePosNormalized.z *= distanceInv; + } else { + distance = 0; + relativePosNormalized = ma_vec3f_init_3f(0, 0, 0); + } + + /* + Angular attenuation. + + Unlike distance gain, the math for this is not specified by the OpenAL spec so we'll just go ahead and figure + this out for ourselves at the expense of possibly being inconsistent with other implementations. + + To do cone attenuation, I'm just using the same math that we'd use to implement a basic spotlight in OpenGL. We + just need to get the direction from the source to the listener and then do a dot product against that and the + direction of the spotlight. Then we just compare that dot product against the cosine of the inner and outer + angles. If the dot product is greater than the the outer angle, we just use coneOuterGain. If it's less than + the inner angle, we just use a gain of 1. Otherwise we linearly interpolate between 1 and coneOuterGain. + */ + if (distance > 0) { + /* Source anglular gain. */ + float spatializerConeInnerAngle; + float spatializerConeOuterAngle; + float spatializerConeOuterGain; + ma_spatializer_get_cone(pSpatializer, &spatializerConeInnerAngle, &spatializerConeOuterAngle, &spatializerConeOuterGain); + + gain *= ma_calculate_angular_gain(relativeDir, ma_vec3f_neg(relativePosNormalized), spatializerConeInnerAngle, spatializerConeOuterAngle, spatializerConeOuterGain); + + /* + We're supporting angular gain on the listener as well for those who want to reduce the volume of sounds that + are positioned behind the listener. On default settings, this will have no effect. + */ + if (pListener != NULL && pListener->config.coneInnerAngleInRadians < 6.283185f) { + ma_vec3f listenerDirection; + float listenerInnerAngle; + float listenerOuterAngle; + float listenerOuterGain; + + if (pListener->config.handedness == ma_handedness_right) { + listenerDirection = ma_vec3f_init_3f(0, 0, -1); + } else { + listenerDirection = ma_vec3f_init_3f(0, 0, +1); + } + + listenerInnerAngle = pListener->config.coneInnerAngleInRadians; + listenerOuterAngle = pListener->config.coneOuterAngleInRadians; + listenerOuterGain = pListener->config.coneOuterGain; + + gain *= ma_calculate_angular_gain(listenerDirection, relativePosNormalized, listenerInnerAngle, listenerOuterAngle, listenerOuterGain); + } + } else { + /* The sound is right on top of the listener. Don't do any angular attenuation. */ + } + + + /* Clamp the gain. */ + gain = ma_clamp(gain, ma_spatializer_get_min_gain(pSpatializer), ma_spatializer_get_max_gain(pSpatializer)); + + /* + The gain needs to be applied per-channel here. The spatialization code below will be changing the per-channel + gains which will then eventually be passed into the gainer which will deal with smoothing the gain transitions + to avoid harsh changes in gain. + */ + for (iChannel = 0; iChannel < channelsOut; iChannel += 1) { + pSpatializer->pNewChannelGainsOut[iChannel] = gain; + } + + /* + Convert to our output channel count. If the listener is disabled we just output silence here. We cannot ignore + the whole section of code here because we need to update some internal spatialization state. + */ + if (ma_spatializer_listener_is_enabled(pListener)) { + ma_channel_map_apply_f32((float*)pFramesOut, pChannelMapOut, channelsOut, (const float*)pFramesIn, pChannelMapIn, channelsIn, frameCount, ma_channel_mix_mode_rectangular, ma_mono_expansion_mode_default); + } else { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, pSpatializer->channelsOut); + } + + + /* + Panning. This is where we'll apply the gain and convert to the output channel count. We have an optimized path for + when we're converting to a mono stream. In that case we don't really need to do any panning - we just apply the + gain to the final output. + */ + /*printf("distance=%f; gain=%f\n", distance, gain);*/ + + /* We must have a valid channel map here to ensure we spatialize properly. */ + MA_ASSERT(pChannelMapOut != NULL); + + /* + We're not converting to mono so we'll want to apply some panning. This is where the feeling of something being + to the left, right, infront or behind the listener is calculated. I'm just using a basic model here. Note that + the code below is not based on any specific algorithm. I'm just implementing this off the top of my head and + seeing how it goes. There might be better ways to do this. + + To determine the direction of the sound relative to a speaker I'm using dot products. Each speaker is given a + direction. For example, the left channel in a stereo system will be -1 on the X axis and the right channel will + be +1 on the X axis. A dot product is performed against the direction vector of the channel and the normalized + position of the sound. + */ + + /* + Calculate our per-channel gains. We do this based on the normalized relative position of the sound and it's + relation to the direction of the channel. + */ + if (distance > 0) { + ma_vec3f unitPos = relativePos; + float distanceInv = 1/distance; + unitPos.x *= distanceInv; + unitPos.y *= distanceInv; + unitPos.z *= distanceInv; + + for (iChannel = 0; iChannel < channelsOut; iChannel += 1) { + ma_channel channelOut; + float d; + float dMin; + + channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannel); + if (ma_is_spatial_channel_position(channelOut)) { + d = ma_mix_f32_fast(1, ma_vec3f_dot(unitPos, ma_get_channel_direction(channelOut)), ma_spatializer_get_directional_attenuation_factor(pSpatializer)); + } else { + d = 1; /* It's not a spatial channel so there's no real notion of direction. */ + } + + /* + In my testing, if the panning effect is too aggressive it makes spatialization feel uncomfortable. + The "dMin" variable below is used to control the aggressiveness of the panning effect. When set to + 0, panning will be most extreme and any sounds that are positioned on the opposite side of the + speaker will be completely silent from that speaker. Not only does this feel uncomfortable, it + doesn't even remotely represent the real world at all because sounds that come from your right side + are still clearly audible from your left side. Setting "dMin" to 1 will result in no panning at + all, which is also not ideal. By setting it to something greater than 0, the spatialization effect + becomes much less dramatic and a lot more bearable. + + Summary: 0 = more extreme panning; 1 = no panning. + */ + dMin = pSpatializer->minSpatializationChannelGain; + + /* + At this point, "d" will be positive if the sound is on the same side as the channel and negative if + it's on the opposite side. It will be in the range of -1..1. There's two ways I can think of to + calculate a panning value. The first is to simply convert it to 0..1, however this has a problem + which I'm not entirely happy with. Considering a stereo system, when a sound is positioned right + in front of the listener it'll result in each speaker getting a gain of 0.5. I don't know if I like + the idea of having a scaling factor of 0.5 being applied to a sound when it's sitting right in front + of the listener. I would intuitively expect that to be played at full volume, or close to it. + + The second idea I think of is to only apply a reduction in gain when the sound is on the opposite + side of the speaker. That is, reduce the gain only when the dot product is negative. The problem + with this is that there will not be any attenuation as the sound sweeps around the 180 degrees + where the dot product is positive. The idea with this option is that you leave the gain at 1 when + the sound is being played on the same side as the speaker and then you just reduce the volume when + the sound is on the other side. + + The summarize, I think the first option should give a better sense of spatialization, but the second + option is better for preserving the sound's power. + + UPDATE: In my testing, I find the first option to sound better. You can feel the sense of space a + bit better, but you can also hear the reduction in volume when it's right in front. + */ +#if 1 + { + /* + Scale the dot product from -1..1 to 0..1. Will result in a sound directly in front losing power + by being played at 0.5 gain. + */ + d = (d + 1) * 0.5f; /* -1..1 to 0..1 */ + d = ma_max(d, dMin); + pSpatializer->pNewChannelGainsOut[iChannel] *= d; + } +#else + { + /* + Only reduce the volume of the sound if it's on the opposite side. This path keeps the volume more + consistent, but comes at the expense of a worse sense of space and positioning. + */ + if (d < 0) { + d += 1; /* Move into the positive range. */ + d = ma_max(d, dMin); + channelGainsOut[iChannel] *= d; + } + } +#endif + } + } else { + /* Assume the sound is right on top of us. Don't do any panning. */ + } + + /* Now we need to apply the volume to each channel. This needs to run through the gainer to ensure we get a smooth volume transition. */ + ma_gainer_set_gains(&pSpatializer->gainer, pSpatializer->pNewChannelGainsOut); + ma_gainer_process_pcm_frames(&pSpatializer->gainer, pFramesOut, pFramesOut, frameCount); + + /* + Before leaving we'll want to update our doppler pitch so that the caller can apply some + pitch shifting if they desire. Note that we need to negate the relative position here + because the doppler calculation needs to be source-to-listener, but ours is listener-to- + source. + */ + if (dopplerFactor > 0) { + pSpatializer->dopplerPitch = ma_doppler_pitch(ma_vec3f_sub(ma_spatializer_listener_get_position(pListener), ma_spatializer_get_position(pSpatializer)), ma_spatializer_get_velocity(pSpatializer), listenerVel, speedOfSound, dopplerFactor); + } else { + pSpatializer->dopplerPitch = 1; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_set_master_volume(ma_spatializer* pSpatializer, float volume) +{ + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_gainer_set_master_volume(&pSpatializer->gainer, volume); +} + +MA_API ma_result ma_spatializer_get_master_volume(const ma_spatializer* pSpatializer, float* pVolume) +{ + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_gainer_get_master_volume(&pSpatializer->gainer, pVolume); +} + +MA_API ma_uint32 ma_spatializer_get_input_channels(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return pSpatializer->channelsIn; +} + +MA_API ma_uint32 ma_spatializer_get_output_channels(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return pSpatializer->channelsOut; +} + +MA_API void ma_spatializer_set_attenuation_model(ma_spatializer* pSpatializer, ma_attenuation_model attenuationModel) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_i32(&pSpatializer->attenuationModel, attenuationModel); +} + +MA_API ma_attenuation_model ma_spatializer_get_attenuation_model(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_attenuation_model_none; + } + + return (ma_attenuation_model)ma_atomic_load_i32(&pSpatializer->attenuationModel); +} + +MA_API void ma_spatializer_set_positioning(ma_spatializer* pSpatializer, ma_positioning positioning) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_i32(&pSpatializer->positioning, positioning); +} + +MA_API ma_positioning ma_spatializer_get_positioning(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_positioning_absolute; + } + + return (ma_positioning)ma_atomic_load_i32(&pSpatializer->positioning); +} + +MA_API void ma_spatializer_set_rolloff(ma_spatializer* pSpatializer, float rolloff) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->rolloff, rolloff); +} + +MA_API float ma_spatializer_get_rolloff(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->rolloff); +} + +MA_API void ma_spatializer_set_min_gain(ma_spatializer* pSpatializer, float minGain) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->minGain, minGain); +} + +MA_API float ma_spatializer_get_min_gain(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->minGain); +} + +MA_API void ma_spatializer_set_max_gain(ma_spatializer* pSpatializer, float maxGain) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->maxGain, maxGain); +} + +MA_API float ma_spatializer_get_max_gain(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->maxGain); +} + +MA_API void ma_spatializer_set_min_distance(ma_spatializer* pSpatializer, float minDistance) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->minDistance, minDistance); +} + +MA_API float ma_spatializer_get_min_distance(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->minDistance); +} + +MA_API void ma_spatializer_set_max_distance(ma_spatializer* pSpatializer, float maxDistance) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->maxDistance, maxDistance); +} + +MA_API float ma_spatializer_get_max_distance(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->maxDistance); +} + +MA_API void ma_spatializer_set_cone(ma_spatializer* pSpatializer, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->coneInnerAngleInRadians, innerAngleInRadians); + ma_atomic_exchange_f32(&pSpatializer->coneOuterAngleInRadians, outerAngleInRadians); + ma_atomic_exchange_f32(&pSpatializer->coneOuterGain, outerGain); +} + +MA_API void ma_spatializer_get_cone(const ma_spatializer* pSpatializer, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pSpatializer == NULL) { + return; + } + + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = ma_atomic_load_f32(&pSpatializer->coneInnerAngleInRadians); + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = ma_atomic_load_f32(&pSpatializer->coneOuterAngleInRadians); + } + + if (pOuterGain != NULL) { + *pOuterGain = ma_atomic_load_f32(&pSpatializer->coneOuterGain); + } +} + +MA_API void ma_spatializer_set_doppler_factor(ma_spatializer* pSpatializer, float dopplerFactor) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->dopplerFactor, dopplerFactor); +} + +MA_API float ma_spatializer_get_doppler_factor(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 1; + } + + return ma_atomic_load_f32(&pSpatializer->dopplerFactor); +} + +MA_API void ma_spatializer_set_directional_attenuation_factor(ma_spatializer* pSpatializer, float directionalAttenuationFactor) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->directionalAttenuationFactor, directionalAttenuationFactor); +} + +MA_API float ma_spatializer_get_directional_attenuation_factor(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 1; + } + + return ma_atomic_load_f32(&pSpatializer->directionalAttenuationFactor); +} + +MA_API void ma_spatializer_set_position(ma_spatializer* pSpatializer, float x, float y, float z) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_vec3f_set(&pSpatializer->position, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_get_position(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pSpatializer->position); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_set_direction(ma_spatializer* pSpatializer, float x, float y, float z) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_vec3f_set(&pSpatializer->direction, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_get_direction(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pSpatializer->direction); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_set_velocity(ma_spatializer* pSpatializer, float x, float y, float z) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_vec3f_set(&pSpatializer->velocity, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_get_velocity(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pSpatializer->velocity); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_get_relative_position_and_direction(const ma_spatializer* pSpatializer, const ma_spatializer_listener* pListener, ma_vec3f* pRelativePos, ma_vec3f* pRelativeDir) +{ + if (pRelativePos != NULL) { + pRelativePos->x = 0; + pRelativePos->y = 0; + pRelativePos->z = 0; + } + + if (pRelativeDir != NULL) { + pRelativeDir->x = 0; + pRelativeDir->y = 0; + pRelativeDir->z = -1; + } + + if (pSpatializer == NULL) { + return; + } + + if (pListener == NULL || ma_spatializer_get_positioning(pSpatializer) == ma_positioning_relative) { + /* There's no listener or we're using relative positioning. */ + if (pRelativePos != NULL) { + *pRelativePos = ma_spatializer_get_position(pSpatializer); + } + if (pRelativeDir != NULL) { + *pRelativeDir = ma_spatializer_get_direction(pSpatializer); + } + } else { + ma_vec3f spatializerPosition; + ma_vec3f spatializerDirection; + ma_vec3f listenerPosition; + ma_vec3f listenerDirection; + ma_vec3f v; + ma_vec3f axisX; + ma_vec3f axisY; + ma_vec3f axisZ; + float m[4][4]; + + spatializerPosition = ma_spatializer_get_position(pSpatializer); + spatializerDirection = ma_spatializer_get_direction(pSpatializer); + listenerPosition = ma_spatializer_listener_get_position(pListener); + listenerDirection = ma_spatializer_listener_get_direction(pListener); + + /* + We need to calcualte the right vector from our forward and up vectors. This is done with + a cross product. + */ + axisZ = ma_vec3f_normalize(listenerDirection); /* Normalization required here because we can't trust the caller. */ + axisX = ma_vec3f_normalize(ma_vec3f_cross(axisZ, pListener->config.worldUp)); /* Normalization required here because the world up vector may not be perpendicular with the forward vector. */ + + /* + The calculation of axisX above can result in a zero-length vector if the listener is + looking straight up on the Y axis. We'll need to fall back to a +X in this case so that + the calculations below don't fall apart. This is where a quaternion based listener and + sound orientation would come in handy. + */ + if (ma_vec3f_len2(axisX) == 0) { + axisX = ma_vec3f_init_3f(1, 0, 0); + } + + axisY = ma_vec3f_cross(axisX, axisZ); /* No normalization is required here because axisX and axisZ are unit length and perpendicular. */ + + /* + We need to swap the X axis if we're left handed because otherwise the cross product above + will have resulted in it pointing in the wrong direction (right handed was assumed in the + cross products above). + */ + if (pListener->config.handedness == ma_handedness_left) { + axisX = ma_vec3f_neg(axisX); + } + + /* Lookat. */ + m[0][0] = axisX.x; m[1][0] = axisX.y; m[2][0] = axisX.z; m[3][0] = -ma_vec3f_dot(axisX, listenerPosition); + m[0][1] = axisY.x; m[1][1] = axisY.y; m[2][1] = axisY.z; m[3][1] = -ma_vec3f_dot(axisY, listenerPosition); + m[0][2] = -axisZ.x; m[1][2] = -axisZ.y; m[2][2] = -axisZ.z; m[3][2] = -ma_vec3f_dot(ma_vec3f_neg(axisZ), listenerPosition); + m[0][3] = 0; m[1][3] = 0; m[2][3] = 0; m[3][3] = 1; + + /* + Multiply the lookat matrix by the spatializer position to transform it to listener + space. This allows calculations to work based on the sound being relative to the + origin which makes things simpler. + */ + if (pRelativePos != NULL) { + v = spatializerPosition; + pRelativePos->x = m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * 1; + pRelativePos->y = m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * 1; + pRelativePos->z = m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z + m[3][2] * 1; + } + + /* + The direction of the sound needs to also be transformed so that it's relative to the + rotation of the listener. + */ + if (pRelativeDir != NULL) { + v = spatializerDirection; + pRelativeDir->x = m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z; + pRelativeDir->y = m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z; + pRelativeDir->z = m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z; + } + } +} + + + + +/************************************************************************************************************************************************************** + +Resampling + +**************************************************************************************************************************************************************/ +MA_API ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_linear_resampler_config config; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + config.lpfNyquistFactor = 1; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t x0Offset; + size_t x1Offset; + size_t lpfOffset; +} ma_linear_resampler_heap_layout; + + +static void ma_linear_resampler_adjust_timer_for_new_rate(ma_linear_resampler* pResampler, ma_uint32 oldSampleRateOut, ma_uint32 newSampleRateOut) +{ + /* + So what's happening here? Basically we need to adjust the fractional component of the time advance based on the new rate. The old time advance will + be based on the old sample rate, but we are needing to adjust it to that it's based on the new sample rate. + */ + ma_uint32 oldRateTimeWhole = pResampler->inTimeFrac / oldSampleRateOut; /* <-- This should almost never be anything other than 0, but leaving it here to make this more general and robust just in case. */ + ma_uint32 oldRateTimeFract = pResampler->inTimeFrac % oldSampleRateOut; + + pResampler->inTimeFrac = + (oldRateTimeWhole * newSampleRateOut) + + ((oldRateTimeFract * newSampleRateOut) / oldSampleRateOut); + + /* Make sure the fractional part is less than the output sample rate. */ + pResampler->inTimeInt += pResampler->inTimeFrac / pResampler->config.sampleRateOut; + pResampler->inTimeFrac = pResampler->inTimeFrac % pResampler->config.sampleRateOut; +} + +static ma_result ma_linear_resampler_set_rate_internal(ma_linear_resampler* pResampler, void* pHeap, ma_linear_resampler_heap_layout* pHeapLayout, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_bool32 isResamplerAlreadyInitialized) +{ + ma_result result; + ma_uint32 gcf; + ma_uint32 lpfSampleRate; + double lpfCutoffFrequency; + ma_lpf_config lpfConfig; + ma_uint32 oldSampleRateOut; /* Required for adjusting time advance down the bottom. */ + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (sampleRateIn == 0 || sampleRateOut == 0) { + return MA_INVALID_ARGS; + } + + oldSampleRateOut = pResampler->config.sampleRateOut; + + pResampler->config.sampleRateIn = sampleRateIn; + pResampler->config.sampleRateOut = sampleRateOut; + + /* Simplify the sample rate. */ + gcf = ma_gcf_u32(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut); + pResampler->config.sampleRateIn /= gcf; + pResampler->config.sampleRateOut /= gcf; + + /* Always initialize the low-pass filter, even when the order is 0. */ + if (pResampler->config.lpfOrder > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + lpfSampleRate = (ma_uint32)(ma_max(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut)); + lpfCutoffFrequency = ( double)(ma_min(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut) * 0.5 * pResampler->config.lpfNyquistFactor); + + lpfConfig = ma_lpf_config_init(pResampler->config.format, pResampler->config.channels, lpfSampleRate, lpfCutoffFrequency, pResampler->config.lpfOrder); + + /* + If the resampler is alreay initialized we don't want to do a fresh initialization of the low-pass filter because it will result in the cached frames + getting cleared. Instead we re-initialize the filter which will maintain any cached frames. + */ + if (isResamplerAlreadyInitialized) { + result = ma_lpf_reinit(&lpfConfig, &pResampler->lpf); + } else { + result = ma_lpf_init_preallocated(&lpfConfig, ma_offset_ptr(pHeap, pHeapLayout->lpfOffset), &pResampler->lpf); + } + + if (result != MA_SUCCESS) { + return result; + } + + + pResampler->inAdvanceInt = pResampler->config.sampleRateIn / pResampler->config.sampleRateOut; + pResampler->inAdvanceFrac = pResampler->config.sampleRateIn % pResampler->config.sampleRateOut; + + /* Our timer was based on the old rate. We need to adjust it so that it's based on the new rate. */ + ma_linear_resampler_adjust_timer_for_new_rate(pResampler, oldSampleRateOut, pResampler->config.sampleRateOut); + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_get_heap_layout(const ma_linear_resampler_config* pConfig, ma_linear_resampler_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* x0 */ + pHeapLayout->x0Offset = pHeapLayout->sizeInBytes; + if (pConfig->format == ma_format_f32) { + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + } else { + pHeapLayout->sizeInBytes += sizeof(ma_int16) * pConfig->channels; + } + + /* x1 */ + pHeapLayout->x1Offset = pHeapLayout->sizeInBytes; + if (pConfig->format == ma_format_f32) { + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + } else { + pHeapLayout->sizeInBytes += sizeof(ma_int16) * pConfig->channels; + } + + /* LPF */ + pHeapLayout->lpfOffset = ma_align_64(pHeapLayout->sizeInBytes); + { + ma_result result; + size_t lpfHeapSizeInBytes; + ma_lpf_config lpfConfig = ma_lpf_config_init(pConfig->format, pConfig->channels, 1, 1, pConfig->lpfOrder); /* Sample rate and cutoff frequency do not matter. */ + + result = ma_lpf_get_heap_size(&lpfConfig, &lpfHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += lpfHeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_get_heap_size(const ma_linear_resampler_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_linear_resampler_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_linear_resampler_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_init_preallocated(const ma_linear_resampler_config* pConfig, void* pHeap, ma_linear_resampler* pResampler) +{ + ma_result result; + ma_linear_resampler_heap_layout heapLayout; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResampler); + + result = ma_linear_resampler_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pResampler->config = *pConfig; + + pResampler->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + if (pConfig->format == ma_format_f32) { + pResampler->x0.f32 = (float*)ma_offset_ptr(pHeap, heapLayout.x0Offset); + pResampler->x1.f32 = (float*)ma_offset_ptr(pHeap, heapLayout.x1Offset); + } else { + pResampler->x0.s16 = (ma_int16*)ma_offset_ptr(pHeap, heapLayout.x0Offset); + pResampler->x1.s16 = (ma_int16*)ma_offset_ptr(pHeap, heapLayout.x1Offset); + } + + /* Setting the rate will set up the filter and time advances for us. */ + result = ma_linear_resampler_set_rate_internal(pResampler, pHeap, &heapLayout, pConfig->sampleRateIn, pConfig->sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_FALSE); + if (result != MA_SUCCESS) { + return result; + } + + pResampler->inTimeInt = 1; /* Set this to one to force an input sample to always be loaded for the first output frame. */ + pResampler->inTimeFrac = 0; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_linear_resampler* pResampler) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_linear_resampler_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_linear_resampler_init_preallocated(pConfig, pHeap, pResampler); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pResampler->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_linear_resampler_uninit(ma_linear_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pResampler == NULL) { + return; + } + + ma_lpf_uninit(&pResampler->lpf, pAllocationCallbacks); + + if (pResampler->_ownsHeap) { + ma_free(pResampler->_pHeap, pAllocationCallbacks); + } +} + +static MA_INLINE ma_int16 ma_linear_resampler_mix_s16(ma_int16 x, ma_int16 y, ma_int32 a, const ma_int32 shift) +{ + ma_int32 b; + ma_int32 c; + ma_int32 r; + + MA_ASSERT(a <= (1<> shift); +} + +static void ma_linear_resampler_interpolate_frame_s16(ma_linear_resampler* pResampler, ma_int16* MA_RESTRICT pFrameOut) +{ + ma_uint32 c; + ma_uint32 a; + const ma_uint32 channels = pResampler->config.channels; + const ma_uint32 shift = 12; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); + + a = (pResampler->inTimeFrac << shift) / pResampler->config.sampleRateOut; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int16 s = ma_linear_resampler_mix_s16(pResampler->x0.s16[c], pResampler->x1.s16[c], a, shift); + pFrameOut[c] = s; + } +} + + +static void ma_linear_resampler_interpolate_frame_f32(ma_linear_resampler* pResampler, float* MA_RESTRICT pFrameOut) +{ + ma_uint32 c; + float a; + const ma_uint32 channels = pResampler->config.channels; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); + + a = (float)pResampler->inTimeFrac / pResampler->config.sampleRateOut; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float s = ma_mix_f32_fast(pResampler->x0.f32[c], pResampler->x1.f32[c], a); + pFrameOut[c] = s; + } +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pResampler->x1.s16, pResampler->x1.s16); + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_s16_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_s16_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + + +static ma_result ma_linear_resampler_process_pcm_frames_f32_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pResampler->x1.f32, pResampler->x1.f32); + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_f32_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_f32(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_f32_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_f32_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + + +MA_API ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + /* */ if (pResampler->config.format == ma_format_s16) { + return ma_linear_resampler_process_pcm_frames_s16(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else if (pResampler->config.format == ma_format_f32) { + return ma_linear_resampler_process_pcm_frames_f32(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Should never get here. Getting here means the format is not supported and you didn't check the return value of ma_linear_resampler_init(). */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; + } +} + + +MA_API ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + return ma_linear_resampler_set_rate_internal(pResampler, NULL, NULL, sampleRateIn, sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_TRUE); +} + +MA_API ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut) +{ + ma_uint32 n; + ma_uint32 d; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (ratioInOut <= 0) { + return MA_INVALID_ARGS; + } + + d = 1000000; + n = (ma_uint32)(ratioInOut * d); + + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } + + MA_ASSERT(n != 0); + + return ma_linear_resampler_set_rate(pResampler, n, d); +} + +MA_API ma_uint64 ma_linear_resampler_get_input_latency(const ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + return 1 + ma_lpf_get_latency(&pResampler->lpf); +} + +MA_API ma_uint64 ma_linear_resampler_get_output_latency(const ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + return ma_linear_resampler_get_input_latency(pResampler) * pResampler->config.sampleRateOut / pResampler->config.sampleRateIn; +} + +MA_API ma_result ma_linear_resampler_get_required_input_frame_count(const ma_linear_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + ma_uint64 inputFrameCount; + + if (pInputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pInputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (outputFrameCount == 0) { + return MA_SUCCESS; + } + + /* Any whole input frames are consumed before the first output frame is generated. */ + inputFrameCount = pResampler->inTimeInt; + outputFrameCount -= 1; + + /* The rest of the output frames can be calculated in constant time. */ + inputFrameCount += outputFrameCount * pResampler->inAdvanceInt; + inputFrameCount += (pResampler->inTimeFrac + (outputFrameCount * pResampler->inAdvanceFrac)) / pResampler->config.sampleRateOut; + + *pInputFrameCount = inputFrameCount; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_get_expected_output_frame_count(const ma_linear_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + ma_uint64 outputFrameCount; + ma_uint64 preliminaryInputFrameCountFromFrac; + ma_uint64 preliminaryInputFrameCount; + + if (pOutputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pOutputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + /* + The first step is to get a preliminary output frame count. This will either be exactly equal to what we need, or less by 1. We need to + determine how many input frames will be consumed by this value. If it's greater than our original input frame count it means we won't + be able to generate an extra frame because we will have run out of input data. Otherwise we will have enough input for the generation + of an extra output frame. This add-by-one logic is necessary due to how the data loading logic works when processing frames. + */ + outputFrameCount = (inputFrameCount * pResampler->config.sampleRateOut) / pResampler->config.sampleRateIn; + + /* + We need to determine how many *whole* input frames will have been processed to generate our preliminary output frame count. This is + used in the logic below to determine whether or not we need to add an extra output frame. + */ + preliminaryInputFrameCountFromFrac = (pResampler->inTimeFrac + outputFrameCount*pResampler->inAdvanceFrac) / pResampler->config.sampleRateOut; + preliminaryInputFrameCount = (pResampler->inTimeInt + outputFrameCount*pResampler->inAdvanceInt ) + preliminaryInputFrameCountFromFrac; + + /* + If the total number of *whole* input frames that would be required to generate our preliminary output frame count is greather than + the amount of whole input frames we have available as input we need to *not* add an extra output frame as there won't be enough data + to actually process. Otherwise we need to add the extra output frame. + */ + if (preliminaryInputFrameCount <= inputFrameCount) { + outputFrameCount += 1; + } + + *pOutputFrameCount = outputFrameCount; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_reset(ma_linear_resampler* pResampler) +{ + ma_uint32 iChannel; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + /* Timers need to be cleared back to zero. */ + pResampler->inTimeInt = 1; /* Set this to one to force an input sample to always be loaded for the first output frame. */ + pResampler->inTimeFrac = 0; + + /* Cached samples need to be cleared. */ + if (pResampler->config.format == ma_format_f32) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = 0; + pResampler->x1.f32[iChannel] = 0; + } + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = 0; + pResampler->x1.s16[iChannel] = 0; + } + } + + /* The low pass filter needs to have it's cache reset. */ + ma_lpf_clear_cache(&pResampler->lpf); + + return MA_SUCCESS; +} + + + +/* Linear resampler backend vtable. */ +static ma_linear_resampler_config ma_resampling_backend_get_config__linear(const ma_resampler_config* pConfig) +{ + ma_linear_resampler_config linearConfig; + + linearConfig = ma_linear_resampler_config_init(pConfig->format, pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut); + linearConfig.lpfOrder = pConfig->linear.lpfOrder; + + return linearConfig; +} + +static ma_result ma_resampling_backend_get_heap_size__linear(void* pUserData, const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_linear_resampler_config linearConfig; + + (void)pUserData; + + linearConfig = ma_resampling_backend_get_config__linear(pConfig); + + return ma_linear_resampler_get_heap_size(&linearConfig, pHeapSizeInBytes); +} + +static ma_result ma_resampling_backend_init__linear(void* pUserData, const ma_resampler_config* pConfig, void* pHeap, ma_resampling_backend** ppBackend) +{ + ma_resampler* pResampler = (ma_resampler*)pUserData; + ma_result result; + ma_linear_resampler_config linearConfig; + + (void)pUserData; + + linearConfig = ma_resampling_backend_get_config__linear(pConfig); + + result = ma_linear_resampler_init_preallocated(&linearConfig, pHeap, &pResampler->state.linear); + if (result != MA_SUCCESS) { + return result; + } + + *ppBackend = &pResampler->state.linear; + + return MA_SUCCESS; +} + +static void ma_resampling_backend_uninit__linear(void* pUserData, ma_resampling_backend* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + (void)pUserData; + + ma_linear_resampler_uninit((ma_linear_resampler*)pBackend, pAllocationCallbacks); +} + +static ma_result ma_resampling_backend_process__linear(void* pUserData, ma_resampling_backend* pBackend, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + (void)pUserData; + + return ma_linear_resampler_process_pcm_frames((ma_linear_resampler*)pBackend, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); +} + +static ma_result ma_resampling_backend_set_rate__linear(void* pUserData, ma_resampling_backend* pBackend, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + (void)pUserData; + + return ma_linear_resampler_set_rate((ma_linear_resampler*)pBackend, sampleRateIn, sampleRateOut); +} + +static ma_uint64 ma_resampling_backend_get_input_latency__linear(void* pUserData, const ma_resampling_backend* pBackend) +{ + (void)pUserData; + + return ma_linear_resampler_get_input_latency((const ma_linear_resampler*)pBackend); +} + +static ma_uint64 ma_resampling_backend_get_output_latency__linear(void* pUserData, const ma_resampling_backend* pBackend) +{ + (void)pUserData; + + return ma_linear_resampler_get_output_latency((const ma_linear_resampler*)pBackend); +} + +static ma_result ma_resampling_backend_get_required_input_frame_count__linear(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + (void)pUserData; + + return ma_linear_resampler_get_required_input_frame_count((const ma_linear_resampler*)pBackend, outputFrameCount, pInputFrameCount); +} + +static ma_result ma_resampling_backend_get_expected_output_frame_count__linear(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + (void)pUserData; + + return ma_linear_resampler_get_expected_output_frame_count((const ma_linear_resampler*)pBackend, inputFrameCount, pOutputFrameCount); +} + +static ma_result ma_resampling_backend_reset__linear(void* pUserData, ma_resampling_backend* pBackend) +{ + (void)pUserData; + + return ma_linear_resampler_reset((ma_linear_resampler*)pBackend); +} + +static ma_resampling_backend_vtable g_ma_linear_resampler_vtable = +{ + ma_resampling_backend_get_heap_size__linear, + ma_resampling_backend_init__linear, + ma_resampling_backend_uninit__linear, + ma_resampling_backend_process__linear, + ma_resampling_backend_set_rate__linear, + ma_resampling_backend_get_input_latency__linear, + ma_resampling_backend_get_output_latency__linear, + ma_resampling_backend_get_required_input_frame_count__linear, + ma_resampling_backend_get_expected_output_frame_count__linear, + ma_resampling_backend_reset__linear +}; + + + +MA_API ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm) +{ + ma_resampler_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.algorithm = algorithm; + + /* Linear. */ + config.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + + return config; +} + +static ma_result ma_resampler_get_vtable(const ma_resampler_config* pConfig, ma_resampler* pResampler, ma_resampling_backend_vtable** ppVTable, void** ppUserData) +{ + MA_ASSERT(pConfig != NULL); + MA_ASSERT(ppVTable != NULL); + MA_ASSERT(ppUserData != NULL); + + /* Safety. */ + *ppVTable = NULL; + *ppUserData = NULL; + + switch (pConfig->algorithm) + { + case ma_resample_algorithm_linear: + { + *ppVTable = &g_ma_linear_resampler_vtable; + *ppUserData = pResampler; + } break; + + case ma_resample_algorithm_custom: + { + *ppVTable = pConfig->pBackendVTable; + *ppUserData = pConfig->pBackendUserData; + } break; + + default: return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_get_heap_size(const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_resampling_backend_vtable* pVTable; + void* pVTableUserData; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_resampler_get_vtable(pConfig, NULL, &pVTable, &pVTableUserData); + if (result != MA_SUCCESS) { + return result; + } + + if (pVTable == NULL || pVTable->onGetHeapSize == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pVTable->onGetHeapSize(pVTableUserData, pConfig, pHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_init_preallocated(const ma_resampler_config* pConfig, void* pHeap, ma_resampler* pResampler) +{ + ma_result result; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResampler); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pResampler->_pHeap = pHeap; + pResampler->format = pConfig->format; + pResampler->channels = pConfig->channels; + pResampler->sampleRateIn = pConfig->sampleRateIn; + pResampler->sampleRateOut = pConfig->sampleRateOut; + + result = ma_resampler_get_vtable(pConfig, pResampler, &pResampler->pBackendVTable, &pResampler->pBackendUserData); + if (result != MA_SUCCESS) { + return result; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onInit == NULL) { + return MA_NOT_IMPLEMENTED; /* onInit not implemented. */ + } + + result = pResampler->pBackendVTable->onInit(pResampler->pBackendUserData, pConfig, pHeap, &pResampler->pBackend); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_init(const ma_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_resampler* pResampler) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_resampler_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_resampler_init_preallocated(pConfig, pHeap, pResampler); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pResampler->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_resampler_uninit(ma_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pResampler == NULL) { + return; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onUninit == NULL) { + return; + } + + pResampler->pBackendVTable->onUninit(pResampler->pBackendUserData, pResampler->pBackend, pAllocationCallbacks); + + if (pResampler->_ownsHeap) { + ma_free(pResampler->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pFrameCountOut == NULL && pFrameCountIn == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onProcess == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onProcess(pResampler->pBackendUserData, pResampler->pBackend, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); +} + +MA_API ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_result result; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (sampleRateIn == 0 || sampleRateOut == 0) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onSetRate == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pResampler->pBackendVTable->onSetRate(pResampler->pBackendUserData, pResampler->pBackend, sampleRateIn, sampleRateOut); + if (result != MA_SUCCESS) { + return result; + } + + pResampler->sampleRateIn = sampleRateIn; + pResampler->sampleRateOut = sampleRateOut; + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio) +{ + ma_uint32 n; + ma_uint32 d; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (ratio <= 0) { + return MA_INVALID_ARGS; + } + + d = 1000; + n = (ma_uint32)(ratio * d); + + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } + + MA_ASSERT(n != 0); + + return ma_resampler_set_rate(pResampler, n, d); +} + +MA_API ma_uint64 ma_resampler_get_input_latency(const ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetInputLatency == NULL) { + return 0; + } + + return pResampler->pBackendVTable->onGetInputLatency(pResampler->pBackendUserData, pResampler->pBackend); +} + +MA_API ma_uint64 ma_resampler_get_output_latency(const ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetOutputLatency == NULL) { + return 0; + } + + return pResampler->pBackendVTable->onGetOutputLatency(pResampler->pBackendUserData, pResampler->pBackend); +} + +MA_API ma_result ma_resampler_get_required_input_frame_count(const ma_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + if (pInputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pInputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetRequiredInputFrameCount == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onGetRequiredInputFrameCount(pResampler->pBackendUserData, pResampler->pBackend, outputFrameCount, pInputFrameCount); +} + +MA_API ma_result ma_resampler_get_expected_output_frame_count(const ma_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + if (pOutputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pOutputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetExpectedOutputFrameCount == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onGetExpectedOutputFrameCount(pResampler->pBackendUserData, pResampler->pBackend, inputFrameCount, pOutputFrameCount); +} + +MA_API ma_result ma_resampler_reset(ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onReset == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onReset(pResampler->pBackendUserData, pResampler->pBackend); +} + +/************************************************************************************************************************************************************** + +Channel Conversion + +**************************************************************************************************************************************************************/ +#ifndef MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT +#define MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT 12 +#endif + +#define MA_PLANE_LEFT 0 +#define MA_PLANE_RIGHT 1 +#define MA_PLANE_FRONT 2 +#define MA_PLANE_BACK 3 +#define MA_PLANE_BOTTOM 4 +#define MA_PLANE_TOP 5 + +static float g_maChannelPlaneRatios[MA_CHANNEL_POSITION_COUNT][6] = { + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_NONE */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_MONO */ + { 0.5f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT */ + { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT */ + { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_CENTER */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_LFE */ + { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_LEFT */ + { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_RIGHT */ + { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT_CENTER */ + { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT_CENTER */ + { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_CENTER */ + { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_LEFT */ + { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}, /* MA_CHANNEL_TOP_CENTER */ + { 0.33f, 0.0f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_LEFT */ + { 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_FRONT_CENTER */ + { 0.0f, 0.33f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_RIGHT */ + { 0.33f, 0.0f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_LEFT */ + { 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_BACK_CENTER */ + { 0.0f, 0.33f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_0 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_1 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_2 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_3 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_4 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_5 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_6 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_7 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_8 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_9 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_10 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_11 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_12 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_13 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_14 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_15 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_16 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_17 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_18 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_19 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_20 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_21 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_22 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_23 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_24 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_25 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_26 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_27 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_28 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_29 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_30 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_31 */ +}; + +static float ma_calculate_channel_position_rectangular_weight(ma_channel channelPositionA, ma_channel channelPositionB) +{ + /* + Imagine the following simplified example: You have a single input speaker which is the front/left speaker which you want to convert to + the following output configuration: + + - front/left + - side/left + - back/left + + The front/left output is easy - it the same speaker position so it receives the full contribution of the front/left input. The amount + of contribution to apply to the side/left and back/left speakers, however, is a bit more complicated. + + Imagine the front/left speaker as emitting audio from two planes - the front plane and the left plane. You can think of the front/left + speaker emitting half of it's total volume from the front, and the other half from the left. Since part of it's volume is being emitted + from the left side, and the side/left and back/left channels also emit audio from the left plane, one would expect that they would + receive some amount of contribution from front/left speaker. The amount of contribution depends on how many planes are shared between + the two speakers. Note that in the examples below I've added a top/front/left speaker as an example just to show how the math works + across 3 spatial dimensions. + + The first thing to do is figure out how each speaker's volume is spread over each of plane: + - front/left: 2 planes (front and left) = 1/2 = half it's total volume on each plane + - side/left: 1 plane (left only) = 1/1 = entire volume from left plane + - back/left: 2 planes (back and left) = 1/2 = half it's total volume on each plane + - top/front/left: 3 planes (top, front and left) = 1/3 = one third it's total volume on each plane + + The amount of volume each channel contributes to each of it's planes is what controls how much it is willing to given and take to other + channels on the same plane. The volume that is willing to the given by one channel is multiplied by the volume that is willing to be + taken by the other to produce the final contribution. + */ + + /* Contribution = Sum(Volume to Give * Volume to Take) */ + float contribution = + g_maChannelPlaneRatios[channelPositionA][0] * g_maChannelPlaneRatios[channelPositionB][0] + + g_maChannelPlaneRatios[channelPositionA][1] * g_maChannelPlaneRatios[channelPositionB][1] + + g_maChannelPlaneRatios[channelPositionA][2] * g_maChannelPlaneRatios[channelPositionB][2] + + g_maChannelPlaneRatios[channelPositionA][3] * g_maChannelPlaneRatios[channelPositionB][3] + + g_maChannelPlaneRatios[channelPositionA][4] * g_maChannelPlaneRatios[channelPositionB][4] + + g_maChannelPlaneRatios[channelPositionA][5] * g_maChannelPlaneRatios[channelPositionB][5]; + + return contribution; +} + +MA_API ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel* pChannelMapIn, ma_uint32 channelsOut, const ma_channel* pChannelMapOut, ma_channel_mix_mode mixingMode) +{ + ma_channel_converter_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.pChannelMapIn = pChannelMapIn; + config.pChannelMapOut = pChannelMapOut; + config.mixingMode = mixingMode; + + return config; +} + +static ma_int32 ma_channel_converter_float_to_fixed(float x) +{ + return (ma_int32)(x * (1< 0); + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_is_spatial_channel_position(ma_channel_map_get_channel(pChannelMap, channels, iChannel))) { + spatialChannelCount++; + } + } + + return spatialChannelCount; +} + +static ma_bool32 ma_is_spatial_channel_position(ma_channel channelPosition) +{ + int i; + + if (channelPosition == MA_CHANNEL_NONE || channelPosition == MA_CHANNEL_MONO || channelPosition == MA_CHANNEL_LFE) { + return MA_FALSE; + } + + if (channelPosition >= MA_CHANNEL_AUX_0 && channelPosition <= MA_CHANNEL_AUX_31) { + return MA_FALSE; + } + + for (i = 0; i < 6; ++i) { /* Each side of a cube. */ + if (g_maChannelPlaneRatios[channelPosition][i] != 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_bool32 ma_channel_map_is_passthrough(const ma_channel* pChannelMapIn, ma_uint32 channelsIn, const ma_channel* pChannelMapOut, ma_uint32 channelsOut) +{ + if (channelsOut == channelsIn) { + return ma_channel_map_is_equal(pChannelMapOut, pChannelMapIn, channelsOut); + } else { + return MA_FALSE; /* Channel counts differ, so cannot be a passthrough. */ + } +} + +static ma_channel_conversion_path ma_channel_map_get_conversion_path(const ma_channel* pChannelMapIn, ma_uint32 channelsIn, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, ma_channel_mix_mode mode) +{ + if (ma_channel_map_is_passthrough(pChannelMapIn, channelsIn, pChannelMapOut, channelsOut)) { + return ma_channel_conversion_path_passthrough; + } + + if (channelsOut == 1 && (pChannelMapOut == NULL || pChannelMapOut[0] == MA_CHANNEL_MONO)) { + return ma_channel_conversion_path_mono_out; + } + + if (channelsIn == 1 && (pChannelMapIn == NULL || pChannelMapIn[0] == MA_CHANNEL_MONO)) { + return ma_channel_conversion_path_mono_in; + } + + if (mode == ma_channel_mix_mode_custom_weights) { + return ma_channel_conversion_path_weights; + } + + /* + We can use a simple shuffle if both channel maps have the same channel count and all channel + positions are present in both. + */ + if (channelsIn == channelsOut) { + ma_uint32 iChannelIn; + ma_bool32 areAllChannelPositionsPresent = MA_TRUE; + for (iChannelIn = 0; iChannelIn < channelsIn; ++iChannelIn) { + ma_bool32 isInputChannelPositionInOutput = MA_FALSE; + if (ma_channel_map_contains_channel_position(channelsOut, pChannelMapOut, ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn))) { + isInputChannelPositionInOutput = MA_TRUE; + break; + } + + if (!isInputChannelPositionInOutput) { + areAllChannelPositionsPresent = MA_FALSE; + break; + } + } + + if (areAllChannelPositionsPresent) { + return ma_channel_conversion_path_shuffle; + } + } + + /* Getting here means we'll need to use weights. */ + return ma_channel_conversion_path_weights; +} + + +static ma_result ma_channel_map_build_shuffle_table(const ma_channel* pChannelMapIn, ma_uint32 channelCountIn, const ma_channel* pChannelMapOut, ma_uint32 channelCountOut, ma_uint8* pShuffleTable) +{ + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + + if (pShuffleTable == NULL || channelCountIn == 0 || channelCountOut == 0) { + return MA_INVALID_ARGS; + } + + /* + When building the shuffle table we just do a 1:1 mapping based on the first occurance of a channel. If the + input channel has more than one occurance of a channel position, the second one will be ignored. + */ + for (iChannelOut = 0; iChannelOut < channelCountOut; iChannelOut += 1) { + ma_channel channelOut; + + /* Default to MA_CHANNEL_INDEX_NULL so that if a mapping is not found it'll be set appropriately. */ + pShuffleTable[iChannelOut] = MA_CHANNEL_INDEX_NULL; + + channelOut = ma_channel_map_get_channel(pChannelMapOut, channelCountOut, iChannelOut); + for (iChannelIn = 0; iChannelIn < channelCountIn; iChannelIn += 1) { + ma_channel channelIn; + + channelIn = ma_channel_map_get_channel(pChannelMapIn, channelCountIn, iChannelIn); + if (channelOut == channelIn) { + pShuffleTable[iChannelOut] = (ma_uint8)iChannelIn; + break; + } + + /* + Getting here means the channels don't exactly match, but we are going to support some + relaxed matching for practicality. If, for example, there are two stereo channel maps, + but one uses front left/right and the other uses side left/right, it makes logical + sense to just map these. The way we'll do it is we'll check if there is a logical + corresponding mapping, and if so, apply it, but we will *not* break from the loop, + thereby giving the loop a chance to find an exact match later which will take priority. + */ + switch (channelOut) + { + /* Left channels. */ + case MA_CHANNEL_FRONT_LEFT: + case MA_CHANNEL_SIDE_LEFT: + { + switch (channelIn) { + case MA_CHANNEL_FRONT_LEFT: + case MA_CHANNEL_SIDE_LEFT: + { + pShuffleTable[iChannelOut] = (ma_uint8)iChannelIn; + } break; + } + } break; + + /* Right channels. */ + case MA_CHANNEL_FRONT_RIGHT: + case MA_CHANNEL_SIDE_RIGHT: + { + switch (channelIn) { + case MA_CHANNEL_FRONT_RIGHT: + case MA_CHANNEL_SIDE_RIGHT: + { + pShuffleTable[iChannelOut] = (ma_uint8)iChannelIn; + } break; + } + } break; + + default: break; + } + } + } + + return MA_SUCCESS; +} + + +static void ma_channel_map_apply_shuffle_table_u8(ma_uint8* pFramesOut, ma_uint32 channelsOut, const ma_uint8* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static void ma_channel_map_apply_shuffle_table_s16(ma_int16* pFramesOut, ma_uint32 channelsOut, const ma_int16* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static void ma_channel_map_apply_shuffle_table_s24(ma_uint8* pFramesOut, ma_uint32 channelsOut, const ma_uint8* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut*3 + 0] = pFramesIn[iChannelIn*3 + 0]; + pFramesOut[iChannelOut*3 + 1] = pFramesIn[iChannelIn*3 + 1]; + pFramesOut[iChannelOut*3 + 2] = pFramesIn[iChannelIn*3 + 2]; + } else { + pFramesOut[iChannelOut*3 + 0] = 0; + } pFramesOut[iChannelOut*3 + 1] = 0; + } pFramesOut[iChannelOut*3 + 2] = 0; + + pFramesOut += channelsOut*3; + pFramesIn += channelsIn*3; + } +} + +static void ma_channel_map_apply_shuffle_table_s32(ma_int32* pFramesOut, ma_uint32 channelsOut, const ma_int32* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static void ma_channel_map_apply_shuffle_table_f32(float* pFramesOut, ma_uint32 channelsOut, const float* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static ma_result ma_channel_map_apply_shuffle_table(void* pFramesOut, ma_uint32 channelsOut, const void* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable, ma_format format) +{ + if (pFramesOut == NULL || pFramesIn == NULL || channelsOut == 0 || pShuffleTable == NULL) { + return MA_INVALID_ARGS; + } + + switch (format) + { + case ma_format_u8: + { + ma_channel_map_apply_shuffle_table_u8((ma_uint8*)pFramesOut, channelsOut, (const ma_uint8*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_s16: + { + ma_channel_map_apply_shuffle_table_s16((ma_int16*)pFramesOut, channelsOut, (const ma_int16*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_s24: + { + ma_channel_map_apply_shuffle_table_s24((ma_uint8*)pFramesOut, channelsOut, (const ma_uint8*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_s32: + { + ma_channel_map_apply_shuffle_table_s32((ma_int32*)pFramesOut, channelsOut, (const ma_int32*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_f32: + { + ma_channel_map_apply_shuffle_table_f32((float*)pFramesOut, channelsOut, (const float*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + default: return MA_INVALID_ARGS; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_map_apply_mono_out_f32(float* pFramesOut, const float* pFramesIn, const ma_channel* pChannelMapIn, ma_uint32 channelsIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannelIn; + ma_uint32 accumulationCount; + + if (pFramesOut == NULL || pFramesIn == NULL || channelsIn == 0) { + return MA_INVALID_ARGS; + } + + /* In this case the output stream needs to be the average of all channels, ignoring NONE. */ + + /* A quick pre-processing step to get the accumulation counter since we're ignoring NONE channels. */ + accumulationCount = 0; + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + if (ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn) != MA_CHANNEL_NONE) { + accumulationCount += 1; + } + } + + if (accumulationCount > 0) { /* <-- Prevent a division by zero. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float accumulation = 0; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + ma_channel channelIn = ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn); + if (channelIn != MA_CHANNEL_NONE) { + accumulation += pFramesIn[iChannelIn]; + } + } + + pFramesOut[0] = accumulation / accumulationCount; + pFramesOut += 1; + pFramesIn += channelsIn; + } + } else { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, 1); + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_map_apply_mono_in_f32(float* MA_RESTRICT pFramesOut, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, const float* MA_RESTRICT pFramesIn, ma_uint64 frameCount, ma_mono_expansion_mode monoExpansionMode) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + if (pFramesOut == NULL || channelsOut == 0 || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the MA_CHANNEL_NONE channel must be ignored in all cases. */ + switch (monoExpansionMode) + { + case ma_mono_expansion_mode_average: + { + float weight; + ma_uint32 validChannelCount = 0; + + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + validChannelCount += 1; + } + } + + weight = 1.0f / validChannelCount; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + pFramesOut[iChannelOut] = pFramesIn[0] * weight; + } + } + + pFramesOut += channelsOut; + pFramesIn += 1; + } + } break; + + case ma_mono_expansion_mode_stereo_only: + { + if (channelsOut >= 2) { + ma_uint32 iChannelLeft = (ma_uint32)-1; + ma_uint32 iChannelRight = (ma_uint32)-1; + + /* + We first need to find our stereo channels. We prefer front-left and front-right, but + if they're not available, we'll also try side-left and side-right. If neither are + available we'll fall through to the default case below. + */ + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut == MA_CHANNEL_SIDE_LEFT) { + iChannelLeft = iChannelOut; + } + if (channelOut == MA_CHANNEL_SIDE_RIGHT) { + iChannelRight = iChannelOut; + } + } + + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut == MA_CHANNEL_FRONT_LEFT) { + iChannelLeft = iChannelOut; + } + if (channelOut == MA_CHANNEL_FRONT_RIGHT) { + iChannelRight = iChannelOut; + } + } + + + if (iChannelLeft != (ma_uint32)-1 && iChannelRight != (ma_uint32)-1) { + /* We found our stereo channels so we can duplicate the signal across those channels. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + if (iChannelOut == iChannelLeft || iChannelOut == iChannelRight) { + pFramesOut[iChannelOut] = pFramesIn[0]; + } else { + pFramesOut[iChannelOut] = 0.0f; + } + } + } + + pFramesOut += channelsOut; + pFramesIn += 1; + } + + break; /* Get out of the switch. */ + } else { + /* Fallthrough. Does not have left and right channels. */ + goto default_handler; + } + } else { + /* Fallthrough. Does not have stereo channels. */ + goto default_handler; + } + }; /* Fallthrough. See comments above. */ + + case ma_mono_expansion_mode_duplicate: + default: + { + default_handler: + { + if (channelsOut <= MA_MAX_CHANNELS) { + ma_bool32 hasEmptyChannel = MA_FALSE; + ma_channel channelPositions[MA_MAX_CHANNELS]; + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + channelPositions[iChannelOut] = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelPositions[iChannelOut] == MA_CHANNEL_NONE) { + hasEmptyChannel = MA_TRUE; + } + } + + if (hasEmptyChannel == MA_FALSE) { + /* + Faster path when there's no MA_CHANNEL_NONE channel positions. This should hopefully + help the compiler with auto-vectorization.m + */ + if (channelsOut == 2) { +#if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + /* We want to do two frames in each iteration. */ + ma_uint64 unrolledFrameCount = frameCount >> 1; + + for (iFrame = 0; iFrame < unrolledFrameCount; iFrame += 1) { + __m128 in0 = _mm_set1_ps(pFramesIn[iFrame*2 + 0]); + __m128 in1 = _mm_set1_ps(pFramesIn[iFrame*2 + 1]); + _mm_storeu_ps(&pFramesOut[iFrame*4 + 0], _mm_shuffle_ps(in1, in0, _MM_SHUFFLE(0, 0, 0, 0))); + } + + /* Tail. */ + iFrame = unrolledFrameCount << 1; + goto generic_on_fastpath; + } else +#endif + { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < 2; iChannelOut += 1) { + pFramesOut[iFrame*2 + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } else if (channelsOut == 6) { +#if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + /* We want to do two frames in each iteration so we can have a multiple of 4 samples. */ + ma_uint64 unrolledFrameCount = frameCount >> 1; + + for (iFrame = 0; iFrame < unrolledFrameCount; iFrame += 1) { + __m128 in0 = _mm_set1_ps(pFramesIn[iFrame*2 + 0]); + __m128 in1 = _mm_set1_ps(pFramesIn[iFrame*2 + 1]); + + _mm_storeu_ps(&pFramesOut[iFrame*12 + 0], in0); + _mm_storeu_ps(&pFramesOut[iFrame*12 + 4], _mm_shuffle_ps(in1, in0, _MM_SHUFFLE(0, 0, 0, 0))); + _mm_storeu_ps(&pFramesOut[iFrame*12 + 8], in1); + } + + /* Tail. */ + iFrame = unrolledFrameCount << 1; + goto generic_on_fastpath; + } else +#endif + { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < 6; iChannelOut += 1) { + pFramesOut[iFrame*6 + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } else if (channelsOut == 8) { +#if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + __m128 in = _mm_set1_ps(pFramesIn[iFrame]); + _mm_storeu_ps(&pFramesOut[iFrame*8 + 0], in); + _mm_storeu_ps(&pFramesOut[iFrame*8 + 4], in); + } + } else +#endif + { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < 8; iChannelOut += 1) { + pFramesOut[iFrame*8 + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } else { + iFrame = 0; + +#if defined(MA_SUPPORT_SSE2) /* For silencing a warning with non-x86 builds. */ + generic_on_fastpath: +#endif + { + for (; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + pFramesOut[iFrame*channelsOut + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } + } else { + /* Slow path. Need to handle MA_CHANNEL_NONE. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + if (channelPositions[iChannelOut] != MA_CHANNEL_NONE) { + pFramesOut[iFrame*channelsOut + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } + } else { + /* Slow path. Too many channels to store on the stack. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + pFramesOut[iFrame*channelsOut + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } + } + } break; + } + + return MA_SUCCESS; +} + +static void ma_channel_map_apply_f32(float* pFramesOut, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, const float* pFramesIn, const ma_channel* pChannelMapIn, ma_uint32 channelsIn, ma_uint64 frameCount, ma_channel_mix_mode mode, ma_mono_expansion_mode monoExpansionMode) +{ + ma_channel_conversion_path conversionPath = ma_channel_map_get_conversion_path(pChannelMapIn, channelsIn, pChannelMapOut, channelsOut, mode); + + /* Optimized Path: Passthrough */ + if (conversionPath == ma_channel_conversion_path_passthrough) { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, ma_format_f32, channelsOut); + return; + } + + /* Special Path: Mono Output. */ + if (conversionPath == ma_channel_conversion_path_mono_out) { + ma_channel_map_apply_mono_out_f32(pFramesOut, pFramesIn, pChannelMapIn, channelsIn, frameCount); + return; + } + + /* Special Path: Mono Input. */ + if (conversionPath == ma_channel_conversion_path_mono_in) { + ma_channel_map_apply_mono_in_f32(pFramesOut, pChannelMapOut, channelsOut, pFramesIn, frameCount, monoExpansionMode); + return; + } + + /* Getting here means we aren't running on an optimized conversion path. */ + if (channelsOut <= MA_MAX_CHANNELS) { + ma_result result; + + if (mode == ma_channel_mix_mode_simple) { + ma_channel shuffleTable[MA_MAX_CHANNELS]; + + result = ma_channel_map_build_shuffle_table(pChannelMapIn, channelsIn, pChannelMapOut, channelsOut, shuffleTable); + if (result != MA_SUCCESS) { + return; + } + + result = ma_channel_map_apply_shuffle_table(pFramesOut, channelsOut, pFramesIn, channelsIn, frameCount, shuffleTable, ma_format_f32); + if (result != MA_SUCCESS) { + return; + } + } else { + ma_uint32 iFrame; + ma_uint32 iChannelOut; + ma_uint32 iChannelIn; + float weights[32][32]; /* Do not use MA_MAX_CHANNELS here! */ + + /* + If we have a small enough number of channels, pre-compute the weights. Otherwise we'll just need to + fall back to a slower path because otherwise we'll run out of stack space. + */ + if (channelsIn <= ma_countof(weights) && channelsOut <= ma_countof(weights)) { + /* Pre-compute weights. */ + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + ma_channel channelIn = ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn); + weights[iChannelOut][iChannelIn] = ma_calculate_channel_position_rectangular_weight(channelOut, channelIn); + } + } + + iFrame = 0; + + /* Experiment: Try an optimized unroll for some specific cases to see how it improves performance. RESULT: Good gains. */ + if (channelsOut == 8) { + /* Experiment 2: Expand the inner loop to see what kind of different it makes. RESULT: Small, but worthwhile gain. */ + if (channelsIn == 2) { + for (; iFrame < frameCount; iFrame += 1) { + float accumulation[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + + accumulation[0] += pFramesIn[iFrame*2 + 0] * weights[0][0]; + accumulation[1] += pFramesIn[iFrame*2 + 0] * weights[1][0]; + accumulation[2] += pFramesIn[iFrame*2 + 0] * weights[2][0]; + accumulation[3] += pFramesIn[iFrame*2 + 0] * weights[3][0]; + accumulation[4] += pFramesIn[iFrame*2 + 0] * weights[4][0]; + accumulation[5] += pFramesIn[iFrame*2 + 0] * weights[5][0]; + accumulation[6] += pFramesIn[iFrame*2 + 0] * weights[6][0]; + accumulation[7] += pFramesIn[iFrame*2 + 0] * weights[7][0]; + + accumulation[0] += pFramesIn[iFrame*2 + 1] * weights[0][1]; + accumulation[1] += pFramesIn[iFrame*2 + 1] * weights[1][1]; + accumulation[2] += pFramesIn[iFrame*2 + 1] * weights[2][1]; + accumulation[3] += pFramesIn[iFrame*2 + 1] * weights[3][1]; + accumulation[4] += pFramesIn[iFrame*2 + 1] * weights[4][1]; + accumulation[5] += pFramesIn[iFrame*2 + 1] * weights[5][1]; + accumulation[6] += pFramesIn[iFrame*2 + 1] * weights[6][1]; + accumulation[7] += pFramesIn[iFrame*2 + 1] * weights[7][1]; + + pFramesOut[iFrame*8 + 0] = accumulation[0]; + pFramesOut[iFrame*8 + 1] = accumulation[1]; + pFramesOut[iFrame*8 + 2] = accumulation[2]; + pFramesOut[iFrame*8 + 3] = accumulation[3]; + pFramesOut[iFrame*8 + 4] = accumulation[4]; + pFramesOut[iFrame*8 + 5] = accumulation[5]; + pFramesOut[iFrame*8 + 6] = accumulation[6]; + pFramesOut[iFrame*8 + 7] = accumulation[7]; + } + } else { + /* When outputting to 8 channels, we can do everything in groups of two 4x SIMD operations. */ + for (; iFrame < frameCount; iFrame += 1) { + float accumulation[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + accumulation[0] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[0][iChannelIn]; + accumulation[1] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[1][iChannelIn]; + accumulation[2] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[2][iChannelIn]; + accumulation[3] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[3][iChannelIn]; + accumulation[4] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[4][iChannelIn]; + accumulation[5] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[5][iChannelIn]; + accumulation[6] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[6][iChannelIn]; + accumulation[7] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[7][iChannelIn]; + } + + pFramesOut[iFrame*8 + 0] = accumulation[0]; + pFramesOut[iFrame*8 + 1] = accumulation[1]; + pFramesOut[iFrame*8 + 2] = accumulation[2]; + pFramesOut[iFrame*8 + 3] = accumulation[3]; + pFramesOut[iFrame*8 + 4] = accumulation[4]; + pFramesOut[iFrame*8 + 5] = accumulation[5]; + pFramesOut[iFrame*8 + 6] = accumulation[6]; + pFramesOut[iFrame*8 + 7] = accumulation[7]; + } + } + } else if (channelsOut == 6) { + /* + When outputting to 6 channels we unfortunately don't have a nice multiple of 4 to do 4x SIMD operations. Instead we'll + expand our weights and do two frames at a time. + */ + for (; iFrame < frameCount; iFrame += 1) { + float accumulation[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + accumulation[0] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[0][iChannelIn]; + accumulation[1] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[1][iChannelIn]; + accumulation[2] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[2][iChannelIn]; + accumulation[3] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[3][iChannelIn]; + accumulation[4] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[4][iChannelIn]; + accumulation[5] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[5][iChannelIn]; + } + + pFramesOut[iFrame*6 + 0] = accumulation[0]; + pFramesOut[iFrame*6 + 1] = accumulation[1]; + pFramesOut[iFrame*6 + 2] = accumulation[2]; + pFramesOut[iFrame*6 + 3] = accumulation[3]; + pFramesOut[iFrame*6 + 4] = accumulation[4]; + pFramesOut[iFrame*6 + 5] = accumulation[5]; + } + } + + /* Leftover frames. */ + for (; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + float accumulation = 0; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + accumulation += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[iChannelOut][iChannelIn]; + } + + pFramesOut[iFrame*channelsOut + iChannelOut] = accumulation; + } + } + } else { + /* Cannot pre-compute weights because not enough room in stack-allocated buffer. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + float accumulation = 0; + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + ma_channel channelIn = ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn); + accumulation += pFramesIn[iFrame*channelsIn + iChannelIn] * ma_calculate_channel_position_rectangular_weight(channelOut, channelIn); + } + + pFramesOut[iFrame*channelsOut + iChannelOut] = accumulation; + } + } + } + } + } else { + /* Fall back to silence. If you hit this, what are you doing with so many channels?! */ + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, channelsOut); + } +} + + +typedef struct +{ + size_t sizeInBytes; + size_t channelMapInOffset; + size_t channelMapOutOffset; + size_t shuffleTableOffset; + size_t weightsOffset; +} ma_channel_converter_heap_layout; + +static ma_channel_conversion_path ma_channel_converter_config_get_conversion_path(const ma_channel_converter_config* pConfig) +{ + return ma_channel_map_get_conversion_path(pConfig->pChannelMapIn, pConfig->channelsIn, pConfig->pChannelMapOut, pConfig->channelsOut, pConfig->mixingMode); +} + +static ma_result ma_channel_converter_get_heap_layout(const ma_channel_converter_config* pConfig, ma_channel_converter_heap_layout* pHeapLayout) +{ + ma_channel_conversion_path conversionPath; + + MA_ASSERT(pHeapLayout != NULL); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channelsIn == 0 || pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + if (!ma_channel_map_is_valid(pConfig->pChannelMapIn, pConfig->channelsIn)) { + return MA_INVALID_ARGS; + } + + if (!ma_channel_map_is_valid(pConfig->pChannelMapOut, pConfig->channelsOut)) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Input channel map. Only need to allocate this if we have an input channel map (otherwise default channel map is assumed). */ + pHeapLayout->channelMapInOffset = pHeapLayout->sizeInBytes; + if (pConfig->pChannelMapIn != NULL) { + pHeapLayout->sizeInBytes += sizeof(ma_channel) * pConfig->channelsIn; + } + + /* Output channel map. Only need to allocate this if we have an output channel map (otherwise default channel map is assumed). */ + pHeapLayout->channelMapOutOffset = pHeapLayout->sizeInBytes; + if (pConfig->pChannelMapOut != NULL) { + pHeapLayout->sizeInBytes += sizeof(ma_channel) * pConfig->channelsOut; + } + + /* Alignment for the next section. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + /* Whether or not we use weights of a shuffle table depends on the channel map themselves and the algorithm we've chosen. */ + conversionPath = ma_channel_converter_config_get_conversion_path(pConfig); + + /* Shuffle table */ + pHeapLayout->shuffleTableOffset = pHeapLayout->sizeInBytes; + if (conversionPath == ma_channel_conversion_path_shuffle) { + pHeapLayout->sizeInBytes += sizeof(ma_uint8) * pConfig->channelsOut; + } + + /* Weights */ + pHeapLayout->weightsOffset = pHeapLayout->sizeInBytes; + if (conversionPath == ma_channel_conversion_path_weights) { + pHeapLayout->sizeInBytes += sizeof(float*) * pConfig->channelsIn; + pHeapLayout->sizeInBytes += sizeof(float ) * pConfig->channelsIn * pConfig->channelsOut; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_get_heap_size(const ma_channel_converter_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_channel_converter_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_channel_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_init_preallocated(const ma_channel_converter_config* pConfig, void* pHeap, ma_channel_converter* pConverter) +{ + ma_result result; + ma_channel_converter_heap_layout heapLayout; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pConverter); + + result = ma_channel_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pConverter->_pHeap = pHeap; + MA_ZERO_MEMORY(pConverter->_pHeap, heapLayout.sizeInBytes); + + pConverter->format = pConfig->format; + pConverter->channelsIn = pConfig->channelsIn; + pConverter->channelsOut = pConfig->channelsOut; + pConverter->mixingMode = pConfig->mixingMode; + + if (pConfig->pChannelMapIn != NULL) { + pConverter->pChannelMapIn = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapInOffset); + ma_channel_map_copy_or_default(pConverter->pChannelMapIn, pConfig->channelsIn, pConfig->pChannelMapIn, pConfig->channelsIn); + } else { + pConverter->pChannelMapIn = NULL; /* Use default channel map. */ + } + + if (pConfig->pChannelMapOut != NULL) { + pConverter->pChannelMapOut = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapOutOffset); + ma_channel_map_copy_or_default(pConverter->pChannelMapOut, pConfig->channelsOut, pConfig->pChannelMapOut, pConfig->channelsOut); + } else { + pConverter->pChannelMapOut = NULL; /* Use default channel map. */ + } + + pConverter->conversionPath = ma_channel_converter_config_get_conversion_path(pConfig); + + if (pConverter->conversionPath == ma_channel_conversion_path_shuffle) { + pConverter->pShuffleTable = (ma_uint8*)ma_offset_ptr(pHeap, heapLayout.shuffleTableOffset); + ma_channel_map_build_shuffle_table(pConverter->pChannelMapIn, pConverter->channelsIn, pConverter->pChannelMapOut, pConverter->channelsOut, pConverter->pShuffleTable); + } + + if (pConverter->conversionPath == ma_channel_conversion_path_weights) { + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32 = (float** )ma_offset_ptr(pHeap, heapLayout.weightsOffset); + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + pConverter->weights.f32[iChannelIn] = (float*)ma_offset_ptr(pHeap, heapLayout.weightsOffset + ((sizeof(float*) * pConverter->channelsIn) + (sizeof(float) * pConverter->channelsOut * iChannelIn))); + } + } else { + pConverter->weights.s16 = (ma_int32**)ma_offset_ptr(pHeap, heapLayout.weightsOffset); + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + pConverter->weights.s16[iChannelIn] = (ma_int32*)ma_offset_ptr(pHeap, heapLayout.weightsOffset + ((sizeof(ma_int32*) * pConverter->channelsIn) + (sizeof(ma_int32) * pConverter->channelsOut * iChannelIn))); + } + } + + /* Silence our weights by default. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; iChannelOut += 1) { + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32[iChannelIn][iChannelOut] = 0.0f; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = 0; + } + } + } + + /* + We now need to fill out our weights table. This is determined by the mixing mode. + */ + + /* In all cases we need to make sure all channels that are present in both channel maps have a 1:1 mapping. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = ma_channel_map_get_channel(pConverter->pChannelMapOut, pConverter->channelsOut, iChannelOut); + + if (channelPosIn == channelPosOut) { + float weight = 1; + + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } + + switch (pConverter->mixingMode) + { + case ma_channel_mix_mode_custom_weights: + { + if (pConfig->ppWeights == NULL) { + return MA_INVALID_ARGS; /* Config specified a custom weights mixing mode, but no custom weights have been specified. */ + } + + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; iChannelOut += 1) { + float weight = pConfig->ppWeights[iChannelIn][iChannelOut]; + + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } break; + + case ma_channel_mix_mode_simple: + { + /* + In simple mode, only set weights for channels that have exactly matching types, leave the rest at + zero. The 1:1 mappings have already been covered before this switch statement. + */ + } break; + + case ma_channel_mix_mode_rectangular: + default: + { + /* Unmapped input channels. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + + if (ma_is_spatial_channel_position(channelPosIn)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->pChannelMapOut, channelPosIn)) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = ma_channel_map_get_channel(pConverter->pChannelMapOut, pConverter->channelsOut, iChannelOut); + + if (ma_is_spatial_channel_position(channelPosOut)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } + + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_f32) { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } else { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } + } + } + } + + /* Unmapped output channels. */ + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = ma_channel_map_get_channel(pConverter->pChannelMapOut, pConverter->channelsOut, iChannelOut); + + if (ma_is_spatial_channel_position(channelPosOut)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->pChannelMapIn, channelPosOut)) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + + if (ma_is_spatial_channel_position(channelPosIn)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } + + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_f32) { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } else { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } + } + } + } + + /* If LFE is in the output channel map but was not present in the input channel map, configure its weight now */ + if (pConfig->calculateLFEFromSpatialChannels) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->pChannelMapIn, MA_CHANNEL_LFE)) { + ma_uint32 spatialChannelCount = ma_channel_map_get_spatial_channel_count(pConverter->pChannelMapIn, pConverter->channelsIn); + ma_uint32 iChannelOutLFE; + + if (spatialChannelCount > 0 && ma_channel_map_find_channel_position(pConverter->channelsOut, pConverter->pChannelMapOut, MA_CHANNEL_LFE, &iChannelOutLFE)) { + const float weightForLFE = 1.0f / spatialChannelCount; + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + const ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + if (ma_is_spatial_channel_position(channelPosIn)) { + if (pConverter->format == ma_format_f32) { + if (pConverter->weights.f32[iChannelIn][iChannelOutLFE] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOutLFE] = weightForLFE; + } + } else { + if (pConverter->weights.s16[iChannelIn][iChannelOutLFE] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOutLFE] = ma_channel_converter_float_to_fixed(weightForLFE); + } + } + } + } + } + } + } + } break; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_converter* pConverter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_channel_converter_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_channel_converter_init_preallocated(pConfig, pHeap, pConverter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pConverter->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_channel_converter_uninit(ma_channel_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pConverter == NULL) { + return; + } + + if (pConverter->_ownsHeap) { + ma_free(pConverter->_pHeap, pAllocationCallbacks); + } +} + +static ma_result ma_channel_converter_process_pcm_frames__passthrough(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__shuffle(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == pConverter->channelsOut); + + return ma_channel_map_apply_shuffle_table(pFramesOut, pConverter->channelsOut, pFramesIn, pConverter->channelsIn, frameCount, pConverter->pShuffleTable, pConverter->format); +} + +static ma_result ma_channel_converter_process_pcm_frames__mono_in(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == 1); + + switch (pConverter->format) + { + case ma_format_u8: + { + /* */ ma_uint8* pFramesOutU8 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInU8 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutU8[iFrame*pConverter->channelsOut + iChannel] = pFramesInU8[iFrame]; + } + } + } break; + + case ma_format_s16: + { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutS16[iFrame*2 + 0] = pFramesInS16[iFrame]; + pFramesOutS16[iFrame*2 + 1] = pFramesInS16[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutS16[iFrame*pConverter->channelsOut + iChannel] = pFramesInS16[iFrame]; + } + } + } + } break; + + case ma_format_s24: + { + /* */ ma_uint8* pFramesOutS24 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInS24 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + ma_uint64 iSampleOut = iFrame*pConverter->channelsOut + iChannel; + ma_uint64 iSampleIn = iFrame; + pFramesOutS24[iSampleOut*3 + 0] = pFramesInS24[iSampleIn*3 + 0]; + pFramesOutS24[iSampleOut*3 + 1] = pFramesInS24[iSampleIn*3 + 1]; + pFramesOutS24[iSampleOut*3 + 2] = pFramesInS24[iSampleIn*3 + 2]; + } + } + } break; + + case ma_format_s32: + { + /* */ ma_int32* pFramesOutS32 = ( ma_int32*)pFramesOut; + const ma_int32* pFramesInS32 = (const ma_int32*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutS32[iFrame*pConverter->channelsOut + iChannel] = pFramesInS32[iFrame]; + } + } + } break; + + case ma_format_f32: + { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutF32[iFrame*2 + 0] = pFramesInF32[iFrame]; + pFramesOutF32[iFrame*2 + 1] = pFramesInF32[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannel] = pFramesInF32[iFrame]; + } + } + } + } break; + + default: return MA_INVALID_OPERATION; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__mono_out(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsOut == 1); + + switch (pConverter->format) + { + case ma_format_u8: + { + /* */ ma_uint8* pFramesOutU8 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInU8 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int32 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += ma_pcm_sample_u8_to_s16_no_scale(pFramesInU8[iFrame*pConverter->channelsIn + iChannel]); + } + + pFramesOutU8[iFrame] = ma_clip_u8(t / pConverter->channelsOut); + } + } break; + + case ma_format_s16: + { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int32 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += pFramesInS16[iFrame*pConverter->channelsIn + iChannel]; + } + + pFramesOutS16[iFrame] = (ma_int16)(t / pConverter->channelsIn); + } + } break; + + case ma_format_s24: + { + /* */ ma_uint8* pFramesOutS24 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInS24 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int64 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += ma_pcm_sample_s24_to_s32_no_scale(&pFramesInS24[(iFrame*pConverter->channelsIn + iChannel)*3]); + } + + ma_pcm_sample_s32_to_s24_no_scale(t / pConverter->channelsIn, &pFramesOutS24[iFrame*3]); + } + } break; + + case ma_format_s32: + { + /* */ ma_int32* pFramesOutS32 = ( ma_int32*)pFramesOut; + const ma_int32* pFramesInS32 = (const ma_int32*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int64 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += pFramesInS32[iFrame*pConverter->channelsIn + iChannel]; + } + + pFramesOutS32[iFrame] = (ma_int32)(t / pConverter->channelsIn); + } + } break; + + case ma_format_f32: + { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + float t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += pFramesInF32[iFrame*pConverter->channelsIn + iChannel]; + } + + pFramesOutF32[iFrame] = t / pConverter->channelsIn; + } + } break; + + default: return MA_INVALID_OPERATION; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__weights(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 iFrame; + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + /* This is the more complicated case. Each of the output channels is accumulated with 0 or more input channels. */ + + /* Clear. */ + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + + /* Accumulate. */ + switch (pConverter->format) + { + case ma_format_u8: + { + /* */ ma_uint8* pFramesOutU8 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInU8 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int16 u8_O = ma_pcm_sample_u8_to_s16_no_scale(pFramesOutU8[iFrame*pConverter->channelsOut + iChannelOut]); + ma_int16 u8_I = ma_pcm_sample_u8_to_s16_no_scale(pFramesInU8 [iFrame*pConverter->channelsIn + iChannelIn ]); + ma_int32 s = (ma_int32)ma_clamp(u8_O + ((u8_I * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT), -128, 127); + pFramesOutU8[iFrame*pConverter->channelsOut + iChannelOut] = ma_clip_u8((ma_int16)s); + } + } + } + } break; + + case ma_format_s16: + { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int32 s = pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut]; + s += (pFramesInS16[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT; + + pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut] = (ma_int16)ma_clamp(s, -32768, 32767); + } + } + } + } break; + + case ma_format_s24: + { + /* */ ma_uint8* pFramesOutS24 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInS24 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int64 s24_O = ma_pcm_sample_s24_to_s32_no_scale(&pFramesOutS24[(iFrame*pConverter->channelsOut + iChannelOut)*3]); + ma_int64 s24_I = ma_pcm_sample_s24_to_s32_no_scale(&pFramesInS24 [(iFrame*pConverter->channelsIn + iChannelIn )*3]); + ma_int64 s24 = (ma_int32)ma_clamp(s24_O + ((s24_I * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT), -8388608, 8388607); + ma_pcm_sample_s32_to_s24_no_scale(s24, &pFramesOutS24[(iFrame*pConverter->channelsOut + iChannelOut)*3]); + } + } + } + } break; + + case ma_format_s32: + { + /* */ ma_int32* pFramesOutS32 = ( ma_int32*)pFramesOut; + const ma_int32* pFramesInS32 = (const ma_int32*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int64 s = pFramesOutS32[iFrame*pConverter->channelsOut + iChannelOut]; + s += ((ma_int64)pFramesInS32[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT; + + pFramesOutS32[iFrame*pConverter->channelsOut + iChannelOut] = ma_clip_s32(s); + } + } + } + } break; + + case ma_format_f32: + { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannelOut] += pFramesInF32[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.f32[iChannelIn][iChannelOut]; + } + } + } + } break; + + default: return MA_INVALID_OPERATION; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesIn == NULL) { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; + } + + switch (pConverter->conversionPath) + { + case ma_channel_conversion_path_passthrough: return ma_channel_converter_process_pcm_frames__passthrough(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_mono_out: return ma_channel_converter_process_pcm_frames__mono_out(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_mono_in: return ma_channel_converter_process_pcm_frames__mono_in(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_shuffle: return ma_channel_converter_process_pcm_frames__shuffle(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_weights: + default: + { + return ma_channel_converter_process_pcm_frames__weights(pConverter, pFramesOut, pFramesIn, frameCount); + } + } +} + +MA_API ma_result ma_channel_converter_get_input_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + ma_channel_map_copy_or_default(pChannelMap, channelMapCap, pConverter->pChannelMapIn, pConverter->channelsIn); + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_get_output_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + ma_channel_map_copy_or_default(pChannelMap, channelMapCap, pConverter->pChannelMapOut, pConverter->channelsOut); + + return MA_SUCCESS; +} + + +/************************************************************************************************************************************************************** + +Data Conversion + +**************************************************************************************************************************************************************/ +MA_API ma_data_converter_config ma_data_converter_config_init_default(void) +{ + ma_data_converter_config config; + MA_ZERO_OBJECT(&config); + + config.ditherMode = ma_dither_mode_none; + config.resampling.algorithm = ma_resample_algorithm_linear; + config.allowDynamicSampleRate = MA_FALSE; /* Disable dynamic sample rates by default because dynamic rate adjustments should be quite rare and it allows an optimization for cases when the in and out sample rates are the same. */ + + /* Linear resampling defaults. */ + config.resampling.linear.lpfOrder = 1; + + return config; +} + +MA_API ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_data_converter_config config = ma_data_converter_config_init_default(); + config.formatIn = formatIn; + config.formatOut = formatOut; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t channelConverterOffset; + size_t resamplerOffset; +} ma_data_converter_heap_layout; + +static ma_bool32 ma_data_converter_config_is_resampler_required(const ma_data_converter_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + + return pConfig->allowDynamicSampleRate || pConfig->sampleRateIn != pConfig->sampleRateOut; +} + +static ma_format ma_data_converter_config_get_mid_format(const ma_data_converter_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + + /* + We want to avoid as much data conversion as possible. The channel converter and linear + resampler both support s16 and f32 natively. We need to decide on the format to use for this + stage. We call this the mid format because it's used in the middle stage of the conversion + pipeline. If the output format is either s16 or f32 we use that one. If that is not the case it + will do the same thing for the input format. If it's neither we just use f32. If we are using a + custom resampling backend, we can only guarantee that f32 will be supported so we'll be forced + to use that if resampling is required. + */ + if (ma_data_converter_config_is_resampler_required(pConfig) && pConfig->resampling.algorithm != ma_resample_algorithm_linear) { + return ma_format_f32; /* <-- Force f32 since that is the only one we can guarantee will be supported by the resampler. */ + } else { + /* */ if (pConfig->formatOut == ma_format_s16 || pConfig->formatOut == ma_format_f32) { + return pConfig->formatOut; + } else if (pConfig->formatIn == ma_format_s16 || pConfig->formatIn == ma_format_f32) { + return pConfig->formatIn; + } else { + return ma_format_f32; + } + } +} + +static ma_channel_converter_config ma_channel_converter_config_init_from_data_converter_config(const ma_data_converter_config* pConfig) +{ + ma_channel_converter_config channelConverterConfig; + + MA_ASSERT(pConfig != NULL); + + channelConverterConfig = ma_channel_converter_config_init(ma_data_converter_config_get_mid_format(pConfig), pConfig->channelsIn, pConfig->pChannelMapIn, pConfig->channelsOut, pConfig->pChannelMapOut, pConfig->channelMixMode); + channelConverterConfig.ppWeights = pConfig->ppChannelWeights; + channelConverterConfig.calculateLFEFromSpatialChannels = pConfig->calculateLFEFromSpatialChannels; + + return channelConverterConfig; +} + +static ma_resampler_config ma_resampler_config_init_from_data_converter_config(const ma_data_converter_config* pConfig) +{ + ma_resampler_config resamplerConfig; + ma_uint32 resamplerChannels; + + MA_ASSERT(pConfig != NULL); + + /* The resampler is the most expensive part of the conversion process, so we need to do it at the stage where the channel count is at it's lowest. */ + if (pConfig->channelsIn < pConfig->channelsOut) { + resamplerChannels = pConfig->channelsIn; + } else { + resamplerChannels = pConfig->channelsOut; + } + + resamplerConfig = ma_resampler_config_init(ma_data_converter_config_get_mid_format(pConfig), resamplerChannels, pConfig->sampleRateIn, pConfig->sampleRateOut, pConfig->resampling.algorithm); + resamplerConfig.linear = pConfig->resampling.linear; + resamplerConfig.pBackendVTable = pConfig->resampling.pBackendVTable; + resamplerConfig.pBackendUserData = pConfig->resampling.pBackendUserData; + + return resamplerConfig; +} + +static ma_result ma_data_converter_get_heap_layout(const ma_data_converter_config* pConfig, ma_data_converter_heap_layout* pHeapLayout) +{ + ma_result result; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channelsIn == 0 || pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Channel converter. */ + pHeapLayout->channelConverterOffset = pHeapLayout->sizeInBytes; + { + size_t heapSizeInBytes; + ma_channel_converter_config channelConverterConfig = ma_channel_converter_config_init_from_data_converter_config(pConfig); + + result = ma_channel_converter_get_heap_size(&channelConverterConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += heapSizeInBytes; + } + + /* Resampler. */ + pHeapLayout->resamplerOffset = pHeapLayout->sizeInBytes; + if (ma_data_converter_config_is_resampler_required(pConfig)) { + size_t heapSizeInBytes; + ma_resampler_config resamplerConfig = ma_resampler_config_init_from_data_converter_config(pConfig); + + result = ma_resampler_get_heap_size(&resamplerConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += heapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_get_heap_size(const ma_data_converter_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_data_converter_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_data_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_init_preallocated(const ma_data_converter_config* pConfig, void* pHeap, ma_data_converter* pConverter) +{ + ma_result result; + ma_data_converter_heap_layout heapLayout; + ma_format midFormat; + ma_bool32 isResamplingRequired; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pConverter); + + result = ma_data_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pConverter->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pConverter->formatIn = pConfig->formatIn; + pConverter->formatOut = pConfig->formatOut; + pConverter->channelsIn = pConfig->channelsIn; + pConverter->channelsOut = pConfig->channelsOut; + pConverter->sampleRateIn = pConfig->sampleRateIn; + pConverter->sampleRateOut = pConfig->sampleRateOut; + pConverter->ditherMode = pConfig->ditherMode; + + /* + Determine if resampling is required. We need to do this so we can determine an appropriate + mid format to use. If resampling is required, the mid format must be ma_format_f32 since + that is the only one that is guaranteed to supported by custom resampling backends. + */ + isResamplingRequired = ma_data_converter_config_is_resampler_required(pConfig); + midFormat = ma_data_converter_config_get_mid_format(pConfig); + + + /* Channel converter. We always initialize this, but we check if it configures itself as a passthrough to determine whether or not it's needed. */ + { + ma_channel_converter_config channelConverterConfig = ma_channel_converter_config_init_from_data_converter_config(pConfig); + + result = ma_channel_converter_init_preallocated(&channelConverterConfig, ma_offset_ptr(pHeap, heapLayout.channelConverterOffset), &pConverter->channelConverter); + if (result != MA_SUCCESS) { + return result; + } + + /* If the channel converter is not a passthrough we need to enable it. Otherwise we can skip it. */ + if (pConverter->channelConverter.conversionPath != ma_channel_conversion_path_passthrough) { + pConverter->hasChannelConverter = MA_TRUE; + } + } + + + /* Resampler. */ + if (isResamplingRequired) { + ma_resampler_config resamplerConfig = ma_resampler_config_init_from_data_converter_config(pConfig); + + result = ma_resampler_init_preallocated(&resamplerConfig, ma_offset_ptr(pHeap, heapLayout.resamplerOffset), &pConverter->resampler); + if (result != MA_SUCCESS) { + return result; + } + + pConverter->hasResampler = MA_TRUE; + } + + + /* We can simplify pre- and post-format conversion if we have neither channel conversion nor resampling. */ + if (pConverter->hasChannelConverter == MA_FALSE && pConverter->hasResampler == MA_FALSE) { + /* We have neither channel conversion nor resampling so we'll only need one of pre- or post-format conversion, or none if the input and output formats are the same. */ + if (pConverter->formatIn == pConverter->formatOut) { + /* The formats are the same so we can just pass through. */ + pConverter->hasPreFormatConversion = MA_FALSE; + pConverter->hasPostFormatConversion = MA_FALSE; + } else { + /* The formats are different so we need to do either pre- or post-format conversion. It doesn't matter which. */ + pConverter->hasPreFormatConversion = MA_FALSE; + pConverter->hasPostFormatConversion = MA_TRUE; + } + } else { + /* We have a channel converter and/or resampler so we'll need channel conversion based on the mid format. */ + if (pConverter->formatIn != midFormat) { + pConverter->hasPreFormatConversion = MA_TRUE; + } + if (pConverter->formatOut != midFormat) { + pConverter->hasPostFormatConversion = MA_TRUE; + } + } + + /* We can enable passthrough optimizations if applicable. Note that we'll only be able to do this if the sample rate is static. */ + if (pConverter->hasPreFormatConversion == MA_FALSE && + pConverter->hasPostFormatConversion == MA_FALSE && + pConverter->hasChannelConverter == MA_FALSE && + pConverter->hasResampler == MA_FALSE) { + pConverter->isPassthrough = MA_TRUE; + } + + + /* We now need to determine our execution path. */ + if (pConverter->isPassthrough) { + pConverter->executionPath = ma_data_converter_execution_path_passthrough; + } else { + if (pConverter->channelsIn < pConverter->channelsOut) { + /* Do resampling first, if necessary. */ + MA_ASSERT(pConverter->hasChannelConverter == MA_TRUE); + + if (pConverter->hasResampler) { + pConverter->executionPath = ma_data_converter_execution_path_resample_first; + } else { + pConverter->executionPath = ma_data_converter_execution_path_channels_only; + } + } else { + /* Do channel conversion first, if necessary. */ + if (pConverter->hasChannelConverter) { + if (pConverter->hasResampler) { + pConverter->executionPath = ma_data_converter_execution_path_channels_first; + } else { + pConverter->executionPath = ma_data_converter_execution_path_channels_only; + } + } else { + /* Channel routing not required. */ + if (pConverter->hasResampler) { + pConverter->executionPath = ma_data_converter_execution_path_resample_only; + } else { + pConverter->executionPath = ma_data_converter_execution_path_format_only; + } + } + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_converter* pConverter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_data_converter_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_data_converter_init_preallocated(pConfig, pHeap, pConverter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pConverter->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_data_converter_uninit(ma_data_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pConverter == NULL) { + return; + } + + if (pConverter->hasResampler) { + ma_resampler_uninit(&pConverter->resampler, pAllocationCallbacks); + } + + ma_channel_converter_uninit(&pConverter->channelConverter, pAllocationCallbacks); + + if (pConverter->_ownsHeap) { + ma_free(pConverter->_pHeap, pAllocationCallbacks); + } +} + +static ma_result ma_data_converter_process_pcm_frames__passthrough(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__format_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pFramesOut, pConverter->formatOut, pFramesIn, pConverter->formatIn, frameCount, pConverter->channelsIn, pConverter->ditherMode); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + + +static ma_result ma_data_converter_process_pcm_frames__resample_with_format_conversion(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result = MA_SUCCESS; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } else { + pFramesInThisIteration = NULL; + } + + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } + + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountInThisIteration > tempBufferOutCap) { + frameCountInThisIteration = tempBufferOutCap; + } + } + + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.format, pFramesInThisIteration, pConverter->formatIn, frameCountInThisIteration, pConverter->channelsIn, pConverter->ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pFramesOutThisIteration, &frameCountOutThisIteration); + } + + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); + + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesInThisIteration, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + break; + } + } + + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->formatOut, pTempBufferOut, pConverter->resampler.format, frameCountOutThisIteration, pConverter->resampler.channels, pConverter->ditherMode); + } + } + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return result; +} + +static ma_result ma_data_converter_process_pcm_frames__resample_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pConverter != NULL); + + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* Neither pre- nor post-format required. This is simple case where only resampling is required. */ + return ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Format conversion required. */ + return ma_data_converter_process_pcm_frames__resample_with_format_conversion(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + +static ma_result ma_data_converter_process_pcm_frames__channels_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* No format conversion required. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOut, pFramesIn, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } else { + /* Format conversion required. */ + ma_uint64 framesProcessed = 0; + + while (framesProcessed < frameCount) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountThisIteration; + + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessed * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } else { + pFramesInThisIteration = NULL; + } + + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessed * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } + + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); + + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferInCap) { + frameCountThisIteration = tempBufferInCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } + } + + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pFramesInThisIteration, pConverter->formatIn, frameCountThisIteration, pConverter->channelsIn, pConverter->ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pTempBufferIn, frameCountThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOutThisIteration, pTempBufferIn, frameCountThisIteration); + } + + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); + + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } + + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pFramesInThisIteration, frameCountThisIteration); + if (result != MA_SUCCESS) { + break; + } + } + + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->formatOut, pTempBufferOut, pConverter->channelConverter.format, frameCountThisIteration, pConverter->channelConverter.channelsOut, pConverter->ditherMode); + } + } + + framesProcessed += frameCountThisIteration; + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__resample_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.channels == pConverter->channelConverter.channelsIn); + MA_ASSERT(pConverter->resampler.channels < pConverter->channelConverter.channelsOut); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pResampleBufferIn; + void* pChannelsBufferOut; + + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + + /* Run input data through the resampler and output it to the temporary buffer. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + } + + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } + + /* We can't read more frames than can fit in the output buffer. */ + if (pConverter->hasPostFormatConversion) { + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + } + + /* We need to ensure we don't try to process too many input frames that we run out of room in the output buffer. If this happens we'll end up glitching. */ + + /* + We need to try to predict how many input frames will be required for the resampler. If the + resampler can tell us, we'll use that. Otherwise we'll need to make a best guess. The further + off we are from this, the more wasted format conversions we'll end up doing. + */ +#if 1 + { + ma_uint64 requiredInputFrameCount; + + result = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration, &requiredInputFrameCount); + if (result != MA_SUCCESS) { + /* Fall back to a best guess. */ + requiredInputFrameCount = (frameCountOutThisIteration * pConverter->resampler.sampleRateIn) / pConverter->resampler.sampleRateOut; + } + + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } +#endif + + if (pConverter->hasPreFormatConversion) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.format, pRunningFramesIn, pConverter->formatIn, frameCountInThisIteration, pConverter->channelsIn, pConverter->ditherMode); + pResampleBufferIn = pTempBufferIn; + } else { + pResampleBufferIn = NULL; + } + } else { + pResampleBufferIn = pRunningFramesIn; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pResampleBufferIn, &frameCountInThisIteration, pTempBufferMid, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* + The input data has been resampled so now we need to run it through the channel converter. The input data is always contained in pTempBufferMid. We only need to do + this part if we have an output buffer. + */ + if (pFramesOut != NULL) { + if (pConverter->hasPostFormatConversion) { + pChannelsBufferOut = pTempBufferOut; + } else { + pChannelsBufferOut = pRunningFramesOut; + } + + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pChannelsBufferOut, pTempBufferMid, frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + /* Finally we do post format conversion. */ + if (pConverter->hasPostFormatConversion) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->formatOut, pChannelsBufferOut, pConverter->channelConverter.format, frameCountOutThisIteration, pConverter->channelConverter.channelsOut, pConverter->ditherMode); + } + } + + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__channels_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.channels == pConverter->channelConverter.channelsOut); + MA_ASSERT(pConverter->resampler.channels <= pConverter->channelConverter.channelsIn); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pChannelsBufferIn; + void* pResampleBufferOut; + + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + + /* + Before doing any processing we need to determine how many frames we should try processing + this iteration, for both input and output. The resampler requires us to perform format and + channel conversion before passing any data into it. If we get our input count wrong, we'll + end up peforming redundant pre-processing. This isn't the end of the world, but it does + result in some inefficiencies proportionate to how far our estimates are off. + + If the resampler has a means to calculate exactly how much we'll need, we'll use that. + Otherwise we'll make a best guess. In order to do this, we'll need to calculate the output + frame count first. + */ + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + } + + /* Now that we have the output frame count we can determine the input frame count. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + } + + if (frameCountInThisIteration > tempBufferMidCap) { + frameCountInThisIteration = tempBufferMidCap; + } + +#if 1 + { + ma_uint64 requiredInputFrameCount; + + result = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration, &requiredInputFrameCount); + if (result != MA_SUCCESS) { + /* Fall back to a best guess. */ + requiredInputFrameCount = (frameCountOutThisIteration * pConverter->resampler.sampleRateIn) / pConverter->resampler.sampleRateOut; + } + + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } +#endif + + + /* Pre format conversion. */ + if (pConverter->hasPreFormatConversion) { + if (pRunningFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pRunningFramesIn, pConverter->formatIn, frameCountInThisIteration, pConverter->channelsIn, pConverter->ditherMode); + pChannelsBufferIn = pTempBufferIn; + } else { + pChannelsBufferIn = NULL; + } + } else { + pChannelsBufferIn = pRunningFramesIn; + } + + + /* Channel conversion. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferMid, pChannelsBufferIn, frameCountInThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* Resampling. */ + if (pConverter->hasPostFormatConversion) { + pResampleBufferOut = pTempBufferOut; + } else { + pResampleBufferOut = pRunningFramesOut; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferMid, &frameCountInThisIteration, pResampleBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* Post format conversion. */ + if (pConverter->hasPostFormatConversion) { + if (pRunningFramesOut != NULL) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->formatOut, pResampleBufferOut, pConverter->resampler.format, frameCountOutThisIteration, pConverter->channelsOut, pConverter->ditherMode); + } + } + + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + switch (pConverter->executionPath) + { + case ma_data_converter_execution_path_passthrough: return ma_data_converter_process_pcm_frames__passthrough(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_format_only: return ma_data_converter_process_pcm_frames__format_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_channels_only: return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_resample_only: return ma_data_converter_process_pcm_frames__resample_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_resample_first: return ma_data_converter_process_pcm_frames__resample_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_channels_first: return ma_data_converter_process_pcm_frames__channels_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + default: return MA_INVALID_OPERATION; /* Should never hit this. */ + } +} + +MA_API ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } + + return ma_resampler_set_rate(&pConverter->resampler, sampleRateIn, sampleRateOut); +} + +MA_API ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } + + return ma_resampler_set_rate_ratio(&pConverter->resampler, ratioInOut); +} + +MA_API ma_uint64 ma_data_converter_get_input_latency(const ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_input_latency(&pConverter->resampler); + } + + return 0; /* No latency without a resampler. */ +} + +MA_API ma_uint64 ma_data_converter_get_output_latency(const ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_output_latency(&pConverter->resampler); + } + + return 0; /* No latency without a resampler. */ +} + +MA_API ma_result ma_data_converter_get_required_input_frame_count(const ma_data_converter* pConverter, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + if (pInputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pInputFrameCount = 0; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_required_input_frame_count(&pConverter->resampler, outputFrameCount, pInputFrameCount); + } else { + *pInputFrameCount = outputFrameCount; /* 1:1 */ + return MA_SUCCESS; + } +} + +MA_API ma_result ma_data_converter_get_expected_output_frame_count(const ma_data_converter* pConverter, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + if (pOutputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pOutputFrameCount = 0; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_expected_output_frame_count(&pConverter->resampler, inputFrameCount, pOutputFrameCount); + } else { + *pOutputFrameCount = inputFrameCount; /* 1:1 */ + return MA_SUCCESS; + } +} + +MA_API ma_result ma_data_converter_get_input_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasChannelConverter) { + ma_channel_converter_get_output_channel_map(&pConverter->channelConverter, pChannelMap, channelMapCap); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pConverter->channelsOut); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_get_output_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasChannelConverter) { + ma_channel_converter_get_input_channel_map(&pConverter->channelConverter, pChannelMap, channelMapCap); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pConverter->channelsIn); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_reset(ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + /* There's nothing to do if we're not resampling. */ + if (pConverter->hasResampler == MA_FALSE) { + return MA_SUCCESS; + } + + return ma_resampler_reset(&pConverter->resampler); +} + + + +/************************************************************************************************************************************************************** + +Channel Maps + +**************************************************************************************************************************************************************/ +static ma_channel ma_channel_map_init_standard_channel(ma_standard_channel_map standardChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex); + +MA_API ma_channel ma_channel_map_get_channel(const ma_channel* pChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex) +{ + if (pChannelMap == NULL) { + return ma_channel_map_init_standard_channel(ma_standard_channel_map_default, channelCount, channelIndex); + } else { + if (channelIndex >= channelCount) { + return MA_CHANNEL_NONE; + } + + return pChannelMap[channelIndex]; + } +} + +MA_API void ma_channel_map_init_blank(ma_channel* pChannelMap, ma_uint32 channels) +{ + if (pChannelMap == NULL) { + return; + } + + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channels); +} + + +static ma_channel ma_channel_map_init_standard_channel_microsoft(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + if (channelCount == 0 || channelIndex >= channelCount) { + return MA_CHANNEL_NONE; + } + + /* This is the Microsoft channel map. Based off the speaker configurations mentioned here: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ksmedia/ns-ksmedia-ksaudio_channel_config */ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: /* No defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { +#ifndef MA_USE_QUAD_MICROSOFT_CHANNEL_MAP + /* Surround. Using the Surround profile has the advantage of the 3rd channel (MA_CHANNEL_FRONT_CENTER) mapping nicely with higher channel counts. */ + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_CENTER; +#else + /* Quad. */ + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; +#endif + } + } break; + + case 5: /* Not defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_SIDE_LEFT; + case 5: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + + case 7: /* Not defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_CENTER; + case 5: return MA_CHANNEL_SIDE_LEFT; + case 6: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_LEFT; + case 5: return MA_CHANNEL_BACK_RIGHT; + case 6: return MA_CHANNEL_SIDE_LEFT; + case 7: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_alsa(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + case 6: return MA_CHANNEL_BACK_CENTER; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + case 6: return MA_CHANNEL_SIDE_LEFT; + case 7: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_rfc3551(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_CENTER; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_SIDE_LEFT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_FRONT_RIGHT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_CENTER; + } + } break; + } + + if (channelCount > 6) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 6)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_flac(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_LEFT; + case 5: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_CENTER; + case 5: return MA_CHANNEL_SIDE_LEFT; + case 6: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_LEFT; + case 5: return MA_CHANNEL_BACK_RIGHT; + case 6: return MA_CHANNEL_SIDE_LEFT; + case 7: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_vorbis(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + case 5: return MA_CHANNEL_LFE; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_CENTER; + case 6: return MA_CHANNEL_LFE; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_LEFT; + case 6: return MA_CHANNEL_BACK_RIGHT; + case 7: return MA_CHANNEL_LFE; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_sound4(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + case 5: return MA_CHANNEL_LFE; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_CENTER; + case 6: return MA_CHANNEL_LFE; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_LEFT; + case 6: return MA_CHANNEL_BACK_RIGHT; + case 7: return MA_CHANNEL_LFE; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_sndio(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: /* No defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: /* Not defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 6: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + } + } break; + } + + if (channelCount > 6) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 6)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + + +static ma_channel ma_channel_map_init_standard_channel(ma_standard_channel_map standardChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex) +{ + if (channelCount == 0 || channelIndex >= channelCount) { + return MA_CHANNEL_NONE; + } + + switch (standardChannelMap) + { + case ma_standard_channel_map_alsa: + { + return ma_channel_map_init_standard_channel_alsa(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_rfc3551: + { + return ma_channel_map_init_standard_channel_rfc3551(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_flac: + { + return ma_channel_map_init_standard_channel_flac(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_vorbis: + { + return ma_channel_map_init_standard_channel_vorbis(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_sound4: + { + return ma_channel_map_init_standard_channel_sound4(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_sndio: + { + return ma_channel_map_init_standard_channel_sndio(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_microsoft: /* Also default. */ + /*case ma_standard_channel_map_default;*/ + default: + { + return ma_channel_map_init_standard_channel_microsoft(channelCount, channelIndex); + } break; + } +} + +MA_API void ma_channel_map_init_standard(ma_standard_channel_map standardChannelMap, ma_channel* pChannelMap, size_t channelMapCap, ma_uint32 channels) +{ + ma_uint32 iChannel; + + if (pChannelMap == NULL || channelMapCap == 0 || channels == 0) { + return; + } + + for (iChannel = 0; iChannel < channels; iChannel += 1) { + if (channelMapCap == 0) { + break; /* Ran out of room. */ + } + + pChannelMap[0] = ma_channel_map_init_standard_channel(standardChannelMap, channels, iChannel); + pChannelMap += 1; + channelMapCap -= 1; + } +} + +MA_API void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels) +{ + if (pOut != NULL && pIn != NULL && channels > 0) { + MA_COPY_MEMORY(pOut, pIn, sizeof(*pOut) * channels); + } +} + +MA_API void ma_channel_map_copy_or_default(ma_channel* pOut, size_t channelMapCapOut, const ma_channel* pIn, ma_uint32 channels) +{ + if (pOut == NULL || channels == 0) { + return; + } + + if (pIn != NULL) { + ma_channel_map_copy(pOut, pIn, channels); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pOut, channelMapCapOut, channels); + } +} + +MA_API ma_bool32 ma_channel_map_is_valid(const ma_channel* pChannelMap, ma_uint32 channels) +{ + /* A channel count of 0 is invalid. */ + if (channels == 0) { + return MA_FALSE; + } + + /* It does not make sense to have a mono channel when there is more than 1 channel. */ + if (channels > 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_channel_map_get_channel(pChannelMap, channels, iChannel) == MA_CHANNEL_MONO) { + return MA_FALSE; + } + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_is_equal(const ma_channel* pChannelMapA, const ma_channel* pChannelMapB, ma_uint32 channels) +{ + ma_uint32 iChannel; + + if (pChannelMapA == pChannelMapB) { + return MA_TRUE; + } + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_channel_map_get_channel(pChannelMapA, channels, iChannel) != ma_channel_map_get_channel(pChannelMapB, channels, iChannel)) { + return MA_FALSE; + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_is_blank(const ma_channel* pChannelMap, ma_uint32 channels) +{ + ma_uint32 iChannel; + + /* A null channel map is equivalent to the default channel map. */ + if (pChannelMap == NULL) { + return MA_FALSE; + } + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (pChannelMap[iChannel] != MA_CHANNEL_NONE) { + return MA_FALSE; + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition) +{ + return ma_channel_map_find_channel_position(channels, pChannelMap, channelPosition, NULL); +} + +MA_API ma_bool32 ma_channel_map_find_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition, ma_uint32* pChannelIndex) +{ + ma_uint32 iChannel; + + if (pChannelIndex != NULL) { + *pChannelIndex = (ma_uint32)-1; + } + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_channel_map_get_channel(pChannelMap, channels, iChannel) == channelPosition) { + if (pChannelIndex != NULL) { + *pChannelIndex = iChannel; + } + + return MA_TRUE; + } + } + + /* Getting here means the channel position was not found. */ + return MA_FALSE; +} + +MA_API size_t ma_channel_map_to_string(const ma_channel* pChannelMap, ma_uint32 channels, char* pBufferOut, size_t bufferCap) +{ + size_t len; + ma_uint32 iChannel; + + len = 0; + + for (iChannel = 0; iChannel < channels; iChannel += 1) { + const char* pChannelStr = ma_channel_position_to_string(ma_channel_map_get_channel(pChannelMap, channels, iChannel)); + size_t channelStrLen = strlen(pChannelStr); + + /* Append the string if necessary. */ + if (pBufferOut != NULL && bufferCap > len + channelStrLen) { + MA_COPY_MEMORY(pBufferOut + len, pChannelStr, channelStrLen); + } + len += channelStrLen; + + /* Append a space if it's not the last item. */ + if (iChannel+1 < channels) { + if (pBufferOut != NULL && bufferCap > len + 1) { + pBufferOut[len] = ' '; + } + len += 1; + } + } + + /* Null terminate. Don't increment the length here. */ + if (pBufferOut != NULL && bufferCap > len + 1) { + pBufferOut[len] = '\0'; + } + + return len; +} + +MA_API const char* ma_channel_position_to_string(ma_channel channel) +{ + switch (channel) + { + case MA_CHANNEL_NONE : return "CHANNEL_NONE"; + case MA_CHANNEL_MONO : return "CHANNEL_MONO"; + case MA_CHANNEL_FRONT_LEFT : return "CHANNEL_FRONT_LEFT"; + case MA_CHANNEL_FRONT_RIGHT : return "CHANNEL_FRONT_RIGHT"; + case MA_CHANNEL_FRONT_CENTER : return "CHANNEL_FRONT_CENTER"; + case MA_CHANNEL_LFE : return "CHANNEL_LFE"; + case MA_CHANNEL_BACK_LEFT : return "CHANNEL_BACK_LEFT"; + case MA_CHANNEL_BACK_RIGHT : return "CHANNEL_BACK_RIGHT"; + case MA_CHANNEL_FRONT_LEFT_CENTER : return "CHANNEL_FRONT_LEFT_CENTER "; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return "CHANNEL_FRONT_RIGHT_CENTER"; + case MA_CHANNEL_BACK_CENTER : return "CHANNEL_BACK_CENTER"; + case MA_CHANNEL_SIDE_LEFT : return "CHANNEL_SIDE_LEFT"; + case MA_CHANNEL_SIDE_RIGHT : return "CHANNEL_SIDE_RIGHT"; + case MA_CHANNEL_TOP_CENTER : return "CHANNEL_TOP_CENTER"; + case MA_CHANNEL_TOP_FRONT_LEFT : return "CHANNEL_TOP_FRONT_LEFT"; + case MA_CHANNEL_TOP_FRONT_CENTER : return "CHANNEL_TOP_FRONT_CENTER"; + case MA_CHANNEL_TOP_FRONT_RIGHT : return "CHANNEL_TOP_FRONT_RIGHT"; + case MA_CHANNEL_TOP_BACK_LEFT : return "CHANNEL_TOP_BACK_LEFT"; + case MA_CHANNEL_TOP_BACK_CENTER : return "CHANNEL_TOP_BACK_CENTER"; + case MA_CHANNEL_TOP_BACK_RIGHT : return "CHANNEL_TOP_BACK_RIGHT"; + case MA_CHANNEL_AUX_0 : return "CHANNEL_AUX_0"; + case MA_CHANNEL_AUX_1 : return "CHANNEL_AUX_1"; + case MA_CHANNEL_AUX_2 : return "CHANNEL_AUX_2"; + case MA_CHANNEL_AUX_3 : return "CHANNEL_AUX_3"; + case MA_CHANNEL_AUX_4 : return "CHANNEL_AUX_4"; + case MA_CHANNEL_AUX_5 : return "CHANNEL_AUX_5"; + case MA_CHANNEL_AUX_6 : return "CHANNEL_AUX_6"; + case MA_CHANNEL_AUX_7 : return "CHANNEL_AUX_7"; + case MA_CHANNEL_AUX_8 : return "CHANNEL_AUX_8"; + case MA_CHANNEL_AUX_9 : return "CHANNEL_AUX_9"; + case MA_CHANNEL_AUX_10 : return "CHANNEL_AUX_10"; + case MA_CHANNEL_AUX_11 : return "CHANNEL_AUX_11"; + case MA_CHANNEL_AUX_12 : return "CHANNEL_AUX_12"; + case MA_CHANNEL_AUX_13 : return "CHANNEL_AUX_13"; + case MA_CHANNEL_AUX_14 : return "CHANNEL_AUX_14"; + case MA_CHANNEL_AUX_15 : return "CHANNEL_AUX_15"; + case MA_CHANNEL_AUX_16 : return "CHANNEL_AUX_16"; + case MA_CHANNEL_AUX_17 : return "CHANNEL_AUX_17"; + case MA_CHANNEL_AUX_18 : return "CHANNEL_AUX_18"; + case MA_CHANNEL_AUX_19 : return "CHANNEL_AUX_19"; + case MA_CHANNEL_AUX_20 : return "CHANNEL_AUX_20"; + case MA_CHANNEL_AUX_21 : return "CHANNEL_AUX_21"; + case MA_CHANNEL_AUX_22 : return "CHANNEL_AUX_22"; + case MA_CHANNEL_AUX_23 : return "CHANNEL_AUX_23"; + case MA_CHANNEL_AUX_24 : return "CHANNEL_AUX_24"; + case MA_CHANNEL_AUX_25 : return "CHANNEL_AUX_25"; + case MA_CHANNEL_AUX_26 : return "CHANNEL_AUX_26"; + case MA_CHANNEL_AUX_27 : return "CHANNEL_AUX_27"; + case MA_CHANNEL_AUX_28 : return "CHANNEL_AUX_28"; + case MA_CHANNEL_AUX_29 : return "CHANNEL_AUX_29"; + case MA_CHANNEL_AUX_30 : return "CHANNEL_AUX_30"; + case MA_CHANNEL_AUX_31 : return "CHANNEL_AUX_31"; + default: break; + } + + return "UNKNOWN"; +} + + + +/************************************************************************************************************************************************************** + +Conversion Helpers + +**************************************************************************************************************************************************************/ +MA_API ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn) +{ + ma_data_converter_config config; + + config = ma_data_converter_config_init(formatIn, formatOut, channelsIn, channelsOut, sampleRateIn, sampleRateOut); + config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + + return ma_convert_frames_ex(pOut, frameCountOut, pIn, frameCountIn, &config); +} + +MA_API ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig) +{ + ma_result result; + ma_data_converter converter; + + if (frameCountIn == 0 || pConfig == NULL) { + return 0; + } + + result = ma_data_converter_init(pConfig, NULL, &converter); + if (result != MA_SUCCESS) { + return 0; /* Failed to initialize the data converter. */ + } + + if (pOut == NULL) { + result = ma_data_converter_get_expected_output_frame_count(&converter, frameCountIn, &frameCountOut); + if (result != MA_SUCCESS) { + if (result == MA_NOT_IMPLEMENTED) { + /* No way to calculate the number of frames, so we'll need to brute force it and loop. */ + frameCountOut = 0; + + while (frameCountIn > 0) { + ma_uint64 framesProcessedIn = frameCountIn; + ma_uint64 framesProcessedOut = 0xFFFFFFFF; + + result = ma_data_converter_process_pcm_frames(&converter, pIn, &framesProcessedIn, NULL, &framesProcessedOut); + if (result != MA_SUCCESS) { + break; + } + + frameCountIn -= framesProcessedIn; + } + } + } + } else { + result = ma_data_converter_process_pcm_frames(&converter, pIn, &frameCountIn, pOut, &frameCountOut); + if (result != MA_SUCCESS) { + frameCountOut = 0; + } + } + + ma_data_converter_uninit(&converter, NULL); + return frameCountOut; +} + + +/************************************************************************************************************************************************************** + +Ring Buffer + +**************************************************************************************************************************************************************/ +static MA_INLINE ma_uint32 ma_rb__extract_offset_in_bytes(ma_uint32 encodedOffset) +{ + return encodedOffset & 0x7FFFFFFF; +} + +static MA_INLINE ma_uint32 ma_rb__extract_offset_loop_flag(ma_uint32 encodedOffset) +{ + return encodedOffset & 0x80000000; +} + +static MA_INLINE void* ma_rb__get_read_ptr(ma_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(ma_atomic_load_32(&pRB->encodedReadOffset))); +} + +static MA_INLINE void* ma_rb__get_write_ptr(ma_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(ma_atomic_load_32(&pRB->encodedWriteOffset))); +} + +static MA_INLINE ma_uint32 ma_rb__construct_offset(ma_uint32 offsetInBytes, ma_uint32 offsetLoopFlag) +{ + return offsetLoopFlag | offsetInBytes; +} + +static MA_INLINE void ma_rb__deconstruct_offset(ma_uint32 encodedOffset, ma_uint32* pOffsetInBytes, ma_uint32* pOffsetLoopFlag) +{ + MA_ASSERT(pOffsetInBytes != NULL); + MA_ASSERT(pOffsetLoopFlag != NULL); + + *pOffsetInBytes = ma_rb__extract_offset_in_bytes(encodedOffset); + *pOffsetLoopFlag = ma_rb__extract_offset_loop_flag(encodedOffset); +} + + +MA_API ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) +{ + ma_result result; + const ma_uint32 maxSubBufferSize = 0x7FFFFFFF - (MA_SIMD_ALIGNMENT-1); + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + if (subbufferSizeInBytes == 0 || subbufferCount == 0) { + return MA_INVALID_ARGS; + } + + if (subbufferSizeInBytes > maxSubBufferSize) { + return MA_INVALID_ARGS; /* Maximum buffer size is ~2GB. The most significant bit is a flag for use internally. */ + } + + + MA_ZERO_OBJECT(pRB); + + result = ma_allocation_callbacks_init_copy(&pRB->allocationCallbacks, pAllocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + pRB->subbufferSizeInBytes = (ma_uint32)subbufferSizeInBytes; + pRB->subbufferCount = (ma_uint32)subbufferCount; + + if (pOptionalPreallocatedBuffer != NULL) { + pRB->subbufferStrideInBytes = (ma_uint32)subbufferStrideInBytes; + pRB->pBuffer = pOptionalPreallocatedBuffer; + } else { + size_t bufferSizeInBytes; + + /* + Here is where we allocate our own buffer. We always want to align this to MA_SIMD_ALIGNMENT for future SIMD optimization opportunity. To do this + we need to make sure the stride is a multiple of MA_SIMD_ALIGNMENT. + */ + pRB->subbufferStrideInBytes = (pRB->subbufferSizeInBytes + (MA_SIMD_ALIGNMENT-1)) & ~MA_SIMD_ALIGNMENT; + + bufferSizeInBytes = (size_t)pRB->subbufferCount*pRB->subbufferStrideInBytes; + pRB->pBuffer = ma_aligned_malloc(bufferSizeInBytes, MA_SIMD_ALIGNMENT, &pRB->allocationCallbacks); + if (pRB->pBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + MA_ZERO_MEMORY(pRB->pBuffer, bufferSizeInBytes); + pRB->ownsBuffer = MA_TRUE; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) +{ + return ma_rb_init_ex(bufferSizeInBytes, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); +} + +MA_API void ma_rb_uninit(ma_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + if (pRB->ownsBuffer) { + ma_aligned_free(pRB->pBuffer, &pRB->allocationCallbacks); + } +} + +MA_API void ma_rb_reset(ma_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, 0); + ma_atomic_exchange_32(&pRB->encodedWriteOffset, 0); +} + +MA_API ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut) +{ + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + size_t bytesAvailable; + size_t bytesRequested; + + if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) { + return MA_INVALID_ARGS; + } + + /* The returned buffer should never move ahead of the write pointer. */ + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + /* + The number of bytes available depends on whether or not the read and write pointers are on the same loop iteration. If so, we + can only read up to the write pointer. If not, we can only read up to the end of the buffer. + */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + bytesAvailable = writeOffsetInBytes - readOffsetInBytes; + } else { + bytesAvailable = pRB->subbufferSizeInBytes - readOffsetInBytes; + } + + bytesRequested = *pSizeInBytes; + if (bytesRequested > bytesAvailable) { + bytesRequested = bytesAvailable; + } + + *pSizeInBytes = bytesRequested; + (*ppBufferOut) = ma_rb__get_read_ptr(pRB); + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 newReadOffsetInBytes; + ma_uint32 newReadOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */ + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + sizeInBytes); + if (newReadOffsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */ + } + + /* Move the read pointer back to the start if necessary. */ + newReadOffsetLoopFlag = readOffsetLoopFlag; + if (newReadOffsetInBytes == pRB->subbufferSizeInBytes) { + newReadOffsetInBytes = 0; + newReadOffsetLoopFlag ^= 0x80000000; + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetLoopFlag, newReadOffsetInBytes)); + + if (ma_rb_pointer_distance(pRB) == 0) { + return MA_AT_END; + } else { + return MA_SUCCESS; + } +} + +MA_API ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + size_t bytesAvailable; + size_t bytesRequested; + + if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) { + return MA_INVALID_ARGS; + } + + /* The returned buffer should never overtake the read buffer. */ + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + /* + In the case of writing, if the write pointer and the read pointer are on the same loop iteration we can only + write up to the end of the buffer. Otherwise we can only write up to the read pointer. The write pointer should + never overtake the read pointer. + */ + if (writeOffsetLoopFlag == readOffsetLoopFlag) { + bytesAvailable = pRB->subbufferSizeInBytes - writeOffsetInBytes; + } else { + bytesAvailable = readOffsetInBytes - writeOffsetInBytes; + } + + bytesRequested = *pSizeInBytes; + if (bytesRequested > bytesAvailable) { + bytesRequested = bytesAvailable; + } + + *pSizeInBytes = bytesRequested; + *ppBufferOut = ma_rb__get_write_ptr(pRB); + + /* Clear the buffer if desired. */ + if (pRB->clearOnWriteAcquire) { + MA_ZERO_MEMORY(*ppBufferOut, *pSizeInBytes); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes) +{ + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newWriteOffsetInBytes; + ma_uint32 newWriteOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */ + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + sizeInBytes); + if (newWriteOffsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */ + } + + /* Move the read pointer back to the start if necessary. */ + newWriteOffsetLoopFlag = writeOffsetLoopFlag; + if (newWriteOffsetInBytes == pRB->subbufferSizeInBytes) { + newWriteOffsetInBytes = 0; + newWriteOffsetLoopFlag ^= 0x80000000; + } + + ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetLoopFlag, newWriteOffsetInBytes)); + + if (ma_rb_pointer_distance(pRB) == 0) { + return MA_AT_END; + } else { + return MA_SUCCESS; + } +} + +MA_API ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newReadOffsetInBytes; + ma_uint32 newReadOffsetLoopFlag; + + if (pRB == NULL || offsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + newReadOffsetLoopFlag = readOffsetLoopFlag; + + /* We cannot go past the write buffer. */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + if ((readOffsetInBytes + offsetInBytes) > writeOffsetInBytes) { + newReadOffsetInBytes = writeOffsetInBytes; + } else { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes); + } + } else { + /* May end up looping. */ + if ((readOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes; + newReadOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */ + } else { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes); + } + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag)); + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newWriteOffsetInBytes; + ma_uint32 newWriteOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + newWriteOffsetLoopFlag = writeOffsetLoopFlag; + + /* We cannot go past the write buffer. */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + /* May end up looping. */ + if ((writeOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes; + newWriteOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */ + } else { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes); + } + } else { + if ((writeOffsetInBytes + offsetInBytes) > readOffsetInBytes) { + newWriteOffsetInBytes = readOffsetInBytes; + } else { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes); + } + } + + ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag)); + return MA_SUCCESS; +} + +MA_API ma_int32 ma_rb_pointer_distance(ma_rb* pRB) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + + if (pRB == NULL) { + return 0; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + return writeOffsetInBytes - readOffsetInBytes; + } else { + return writeOffsetInBytes + (pRB->subbufferSizeInBytes - readOffsetInBytes); + } +} + +MA_API ma_uint32 ma_rb_available_read(ma_rb* pRB) +{ + ma_int32 dist; + + if (pRB == NULL) { + return 0; + } + + dist = ma_rb_pointer_distance(pRB); + if (dist < 0) { + return 0; + } + + return dist; +} + +MA_API ma_uint32 ma_rb_available_write(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_size(pRB) - ma_rb_pointer_distance(pRB)); +} + +MA_API size_t ma_rb_get_subbuffer_size(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return pRB->subbufferSizeInBytes; +} + +MA_API size_t ma_rb_get_subbuffer_stride(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + if (pRB->subbufferStrideInBytes == 0) { + return (size_t)pRB->subbufferSizeInBytes; + } + + return (size_t)pRB->subbufferStrideInBytes; +} + +MA_API size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex) +{ + if (pRB == NULL) { + return 0; + } + + return subbufferIndex * ma_rb_get_subbuffer_stride(pRB); +} + +MA_API void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer) +{ + if (pRB == NULL) { + return NULL; + } + + return ma_offset_ptr(pBuffer, ma_rb_get_subbuffer_offset(pRB, subbufferIndex)); +} + + + +static ma_result ma_pcm_rb_data_source__on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + /* Since there's no notion of an end, we don't ever want to return MA_AT_END here. But it is possible to return 0. */ + ma_pcm_rb* pRB = (ma_pcm_rb*)pDataSource; + ma_result result; + ma_uint64 totalFramesRead; + + MA_ASSERT(pRB != NULL); + + /* We need to run this in a loop since the ring buffer itself may loop. */ + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + void* pMappedBuffer; + ma_uint32 mappedFrameCount; + ma_uint64 framesToRead = frameCount - totalFramesRead; + if (framesToRead > 0xFFFFFFFF) { + framesToRead = 0xFFFFFFFF; + } + + mappedFrameCount = (ma_uint32)framesToRead; + result = ma_pcm_rb_acquire_read(pRB, &mappedFrameCount, &pMappedBuffer); + if (result != MA_SUCCESS) { + break; + } + + if (mappedFrameCount == 0) { + break; /* <-- End of ring buffer. */ + } + + ma_copy_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, pRB->format, pRB->channels), pMappedBuffer, mappedFrameCount, pRB->format, pRB->channels); + + result = ma_pcm_rb_commit_read(pRB, mappedFrameCount); + if (result != MA_SUCCESS) { + break; + } + + totalFramesRead += mappedFrameCount; + } + + *pFramesRead = totalFramesRead; + return MA_SUCCESS; +} + +static ma_result ma_pcm_rb_data_source__on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_pcm_rb* pRB = (ma_pcm_rb*)pDataSource; + MA_ASSERT(pRB != NULL); + + if (pFormat != NULL) { + *pFormat = pRB->format; + } + + if (pChannels != NULL) { + *pChannels = pRB->channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pRB->sampleRate; + } + + /* Just assume the default channel map. */ + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pRB->channels); + } + + return MA_SUCCESS; +} + +static ma_data_source_vtable ma_gRBDataSourceVTable = +{ + ma_pcm_rb_data_source__on_read, + NULL, /* onSeek */ + ma_pcm_rb_data_source__on_get_data_format, + NULL, /* onGetCursor */ + NULL, /* onGetLength */ + NULL, /* onSetLooping */ + 0 +}; + +static MA_INLINE ma_uint32 ma_pcm_rb_get_bpf(ma_pcm_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + + return ma_get_bytes_per_frame(pRB->format, pRB->channels); +} + +MA_API ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) +{ + ma_uint32 bpf; + ma_result result; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pRB); + + bpf = ma_get_bytes_per_frame(format, channels); + if (bpf == 0) { + return MA_INVALID_ARGS; + } + + result = ma_rb_init_ex(subbufferSizeInFrames*bpf, subbufferCount, subbufferStrideInFrames*bpf, pOptionalPreallocatedBuffer, pAllocationCallbacks, &pRB->rb); + if (result != MA_SUCCESS) { + return result; + } + + pRB->format = format; + pRB->channels = channels; + pRB->sampleRate = 0; /* The sample rate is not passed in as a parameter. */ + + /* The PCM ring buffer is a data source. We need to get that set up as well. */ + { + ma_data_source_config dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &ma_gRBDataSourceVTable; + + result = ma_data_source_init(&dataSourceConfig, &pRB->ds); + if (result != MA_SUCCESS) { + ma_rb_uninit(&pRB->rb); + return result; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) +{ + return ma_pcm_rb_init_ex(format, channels, bufferSizeInFrames, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); +} + +MA_API void ma_pcm_rb_uninit(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_data_source_uninit(&pRB->ds); + ma_rb_uninit(&pRB->rb); +} + +MA_API void ma_pcm_rb_reset(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_rb_reset(&pRB->rb); +} + +MA_API ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut) +{ + size_t sizeInBytes; + ma_result result; + + if (pRB == NULL || pSizeInFrames == NULL) { + return MA_INVALID_ARGS; + } + + sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB); + + result = ma_rb_acquire_read(&pRB->rb, &sizeInBytes, ppBufferOut); + if (result != MA_SUCCESS) { + return result; + } + + *pSizeInFrames = (ma_uint32)(sizeInBytes / (size_t)ma_pcm_rb_get_bpf(pRB)); + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_commit_read(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut) +{ + size_t sizeInBytes; + ma_result result; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB); + + result = ma_rb_acquire_write(&pRB->rb, &sizeInBytes, ppBufferOut); + if (result != MA_SUCCESS) { + return result; + } + + *pSizeInFrames = (ma_uint32)(sizeInBytes / ma_pcm_rb_get_bpf(pRB)); + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_commit_write(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_seek_read(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_seek_write(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_pointer_distance(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_available_read(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_available_write(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_size(&pRB->rb) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_stride(&pRB->rb) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_offset(&pRB->rb, subbufferIndex) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer) +{ + if (pRB == NULL) { + return NULL; + } + + return ma_rb_get_subbuffer_ptr(&pRB->rb, subbufferIndex, pBuffer); +} + +MA_API ma_format ma_pcm_rb_get_format(const ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return ma_format_unknown; + } + + return pRB->format; +} + +MA_API ma_uint32 ma_pcm_rb_get_channels(const ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return pRB->channels; +} + +MA_API ma_uint32 ma_pcm_rb_get_sample_rate(const ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return pRB->sampleRate; +} + +MA_API void ma_pcm_rb_set_sample_rate(ma_pcm_rb* pRB, ma_uint32 sampleRate) +{ + if (pRB == NULL) { + return; + } + + pRB->sampleRate = sampleRate; +} + + + +MA_API ma_result ma_duplex_rb_init(ma_format captureFormat, ma_uint32 captureChannels, ma_uint32 sampleRate, ma_uint32 captureInternalSampleRate, ma_uint32 captureInternalPeriodSizeInFrames, const ma_allocation_callbacks* pAllocationCallbacks, ma_duplex_rb* pRB) +{ + ma_result result; + ma_uint32 sizeInFrames; + + sizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(sampleRate, captureInternalSampleRate, captureInternalPeriodSizeInFrames * 5); + if (sizeInFrames == 0) { + return MA_INVALID_ARGS; + } + + result = ma_pcm_rb_init(captureFormat, captureChannels, sizeInFrames, NULL, pAllocationCallbacks, &pRB->rb); + if (result != MA_SUCCESS) { + return result; + } + + /* Seek forward a bit so we have a bit of a buffer in case of desyncs. */ + ma_pcm_rb_seek_write((ma_pcm_rb*)pRB, captureInternalPeriodSizeInFrames * 2); + + return MA_SUCCESS; +} + +MA_API ma_result ma_duplex_rb_uninit(ma_duplex_rb* pRB) +{ + ma_pcm_rb_uninit((ma_pcm_rb*)pRB); + return MA_SUCCESS; +} + + + +/************************************************************************************************************************************************************** + +Miscellaneous Helpers + +**************************************************************************************************************************************************************/ +MA_API const char* ma_result_description(ma_result result) +{ + switch (result) + { + case MA_SUCCESS: return "No error"; + case MA_ERROR: return "Unknown error"; + case MA_INVALID_ARGS: return "Invalid argument"; + case MA_INVALID_OPERATION: return "Invalid operation"; + case MA_OUT_OF_MEMORY: return "Out of memory"; + case MA_OUT_OF_RANGE: return "Out of range"; + case MA_ACCESS_DENIED: return "Permission denied"; + case MA_DOES_NOT_EXIST: return "Resource does not exist"; + case MA_ALREADY_EXISTS: return "Resource already exists"; + case MA_TOO_MANY_OPEN_FILES: return "Too many open files"; + case MA_INVALID_FILE: return "Invalid file"; + case MA_TOO_BIG: return "Too large"; + case MA_PATH_TOO_LONG: return "Path too long"; + case MA_NAME_TOO_LONG: return "Name too long"; + case MA_NOT_DIRECTORY: return "Not a directory"; + case MA_IS_DIRECTORY: return "Is a directory"; + case MA_DIRECTORY_NOT_EMPTY: return "Directory not empty"; + case MA_AT_END: return "At end"; + case MA_NO_SPACE: return "No space available"; + case MA_BUSY: return "Device or resource busy"; + case MA_IO_ERROR: return "Input/output error"; + case MA_INTERRUPT: return "Interrupted"; + case MA_UNAVAILABLE: return "Resource unavailable"; + case MA_ALREADY_IN_USE: return "Resource already in use"; + case MA_BAD_ADDRESS: return "Bad address"; + case MA_BAD_SEEK: return "Illegal seek"; + case MA_BAD_PIPE: return "Broken pipe"; + case MA_DEADLOCK: return "Deadlock"; + case MA_TOO_MANY_LINKS: return "Too many links"; + case MA_NOT_IMPLEMENTED: return "Not implemented"; + case MA_NO_MESSAGE: return "No message of desired type"; + case MA_BAD_MESSAGE: return "Invalid message"; + case MA_NO_DATA_AVAILABLE: return "No data available"; + case MA_INVALID_DATA: return "Invalid data"; + case MA_TIMEOUT: return "Timeout"; + case MA_NO_NETWORK: return "Network unavailable"; + case MA_NOT_UNIQUE: return "Not unique"; + case MA_NOT_SOCKET: return "Socket operation on non-socket"; + case MA_NO_ADDRESS: return "Destination address required"; + case MA_BAD_PROTOCOL: return "Protocol wrong type for socket"; + case MA_PROTOCOL_UNAVAILABLE: return "Protocol not available"; + case MA_PROTOCOL_NOT_SUPPORTED: return "Protocol not supported"; + case MA_PROTOCOL_FAMILY_NOT_SUPPORTED: return "Protocol family not supported"; + case MA_ADDRESS_FAMILY_NOT_SUPPORTED: return "Address family not supported"; + case MA_SOCKET_NOT_SUPPORTED: return "Socket type not supported"; + case MA_CONNECTION_RESET: return "Connection reset"; + case MA_ALREADY_CONNECTED: return "Already connected"; + case MA_NOT_CONNECTED: return "Not connected"; + case MA_CONNECTION_REFUSED: return "Connection refused"; + case MA_NO_HOST: return "No host"; + case MA_IN_PROGRESS: return "Operation in progress"; + case MA_CANCELLED: return "Operation cancelled"; + case MA_MEMORY_ALREADY_MAPPED: return "Memory already mapped"; + + case MA_FORMAT_NOT_SUPPORTED: return "Format not supported"; + case MA_DEVICE_TYPE_NOT_SUPPORTED: return "Device type not supported"; + case MA_SHARE_MODE_NOT_SUPPORTED: return "Share mode not supported"; + case MA_NO_BACKEND: return "No backend"; + case MA_NO_DEVICE: return "No device"; + case MA_API_NOT_FOUND: return "API not found"; + case MA_INVALID_DEVICE_CONFIG: return "Invalid device config"; + + case MA_DEVICE_NOT_INITIALIZED: return "Device not initialized"; + case MA_DEVICE_NOT_STARTED: return "Device not started"; + + case MA_FAILED_TO_INIT_BACKEND: return "Failed to initialize backend"; + case MA_FAILED_TO_OPEN_BACKEND_DEVICE: return "Failed to open backend device"; + case MA_FAILED_TO_START_BACKEND_DEVICE: return "Failed to start backend device"; + case MA_FAILED_TO_STOP_BACKEND_DEVICE: return "Failed to stop backend device"; + + default: return "Unknown error"; + } +} + +MA_API void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } else { + return NULL; /* Do not fall back to the default implementation. */ + } + } else { + return ma__malloc_default(sz, NULL); + } +} + +MA_API void* ma_calloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + void* p = ma_malloc(sz, pAllocationCallbacks); + if (p != NULL) { + MA_ZERO_MEMORY(p, sz); + } + + return p; +} + +MA_API void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, sz, pAllocationCallbacks->pUserData); + } else { + return NULL; /* Do not fall back to the default implementation. */ + } + } else { + return ma__realloc_default(p, sz, NULL); + } +} + +MA_API void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL) { + return; + } + + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } else { + return; /* Do no fall back to the default implementation. */ + } + } else { + ma__free_default(p, NULL); + } +} + +MA_API void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t extraBytes; + void* pUnaligned; + void* pAligned; + + if (alignment == 0) { + return 0; + } + + extraBytes = alignment-1 + sizeof(void*); + + pUnaligned = ma_malloc(sz + extraBytes, pAllocationCallbacks); + if (pUnaligned == NULL) { + return NULL; + } + + pAligned = (void*)(((ma_uintptr)pUnaligned + extraBytes) & ~((ma_uintptr)(alignment-1))); + ((void**)pAligned)[-1] = pUnaligned; + + return pAligned; +} + +MA_API void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_free(((void**)p)[-1], pAllocationCallbacks); +} + +MA_API const char* ma_get_format_name(ma_format format) +{ + switch (format) + { + case ma_format_unknown: return "Unknown"; + case ma_format_u8: return "8-bit Unsigned Integer"; + case ma_format_s16: return "16-bit Signed Integer"; + case ma_format_s24: return "24-bit Signed Integer (Tightly Packed)"; + case ma_format_s32: return "32-bit Signed Integer"; + case ma_format_f32: return "32-bit IEEE Floating Point"; + default: return "Invalid"; + } +} + +MA_API void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels) +{ + ma_uint32 i; + for (i = 0; i < channels; ++i) { + pOut[i] = ma_mix_f32(pInA[i], pInB[i], factor); + } +} + + +MA_API ma_uint32 ma_get_bytes_per_sample(ma_format format) +{ + ma_uint32 sizes[] = { + 0, /* unknown */ + 1, /* u8 */ + 2, /* s16 */ + 3, /* s24 */ + 4, /* s32 */ + 4, /* f32 */ + }; + return sizes[format]; +} + + + +#define MA_DATA_SOURCE_DEFAULT_RANGE_BEG 0 +#define MA_DATA_SOURCE_DEFAULT_RANGE_END ~((ma_uint64)0) +#define MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG 0 +#define MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END ~((ma_uint64)0) + +MA_API ma_data_source_config ma_data_source_config_init(void) +{ + ma_data_source_config config; + + MA_ZERO_OBJECT(&config); + + return config; +} + + +MA_API ma_result ma_data_source_init(const ma_data_source_config* pConfig, ma_data_source* pDataSource) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataSourceBase); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->vtable = pConfig->vtable; + pDataSourceBase->rangeBegInFrames = MA_DATA_SOURCE_DEFAULT_RANGE_BEG; + pDataSourceBase->rangeEndInFrames = MA_DATA_SOURCE_DEFAULT_RANGE_END; + pDataSourceBase->loopBegInFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG; + pDataSourceBase->loopEndInFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END; + pDataSourceBase->pCurrent = pDataSource; /* Always read from ourself by default. */ + pDataSourceBase->pNext = NULL; + pDataSourceBase->onGetNext = NULL; + + return MA_SUCCESS; +} + +MA_API void ma_data_source_uninit(ma_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return; + } + + /* + This is placeholder in case we need this later. Data sources need to call this in their + uninitialization routine to ensure things work later on if something is added here. + */ +} + +static ma_result ma_data_source_resolve_current(ma_data_source* pDataSource, ma_data_source** ppCurrentDataSource) +{ + ma_data_source_base* pCurrentDataSource = (ma_data_source_base*)pDataSource; + + MA_ASSERT(pDataSource != NULL); + MA_ASSERT(ppCurrentDataSource != NULL); + + if (pCurrentDataSource->pCurrent == NULL) { + /* + The current data source is NULL. If we're using this in the context of a chain we need to return NULL + here so that we don't end up looping. Otherwise we just return the data source itself. + */ + if (pCurrentDataSource->pNext != NULL || pCurrentDataSource->onGetNext != NULL) { + pCurrentDataSource = NULL; + } else { + pCurrentDataSource = (ma_data_source_base*)pDataSource; /* Not being used in a chain. Make sure we just always read from the data source itself at all times. */ + } + } else { + pCurrentDataSource = (ma_data_source_base*)pCurrentDataSource->pCurrent; + } + + *ppCurrentDataSource = pCurrentDataSource; + + return MA_SUCCESS; +} + +static ma_result ma_data_source_read_pcm_frames_within_range(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_uint64 framesRead = 0; + ma_bool32 loop = ma_data_source_is_looping(pDataSource); + + if (pDataSourceBase == NULL) { + return MA_AT_END; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if ((pDataSourceBase->vtable->flags & MA_DATA_SOURCE_SELF_MANAGED_RANGE_AND_LOOP_POINT) != 0 || (pDataSourceBase->rangeEndInFrames == ~((ma_uint64)0) && (pDataSourceBase->loopEndInFrames == ~((ma_uint64)0) || loop == MA_FALSE))) { + /* Either the data source is self-managing the range, or no range is set - just read like normal. The data source itself will tell us when the end is reached. */ + result = pDataSourceBase->vtable->onRead(pDataSourceBase, pFramesOut, frameCount, &framesRead); + } else { + /* Need to clamp to within the range. */ + ma_uint64 relativeCursor; + ma_uint64 absoluteCursor; + + result = ma_data_source_get_cursor_in_pcm_frames(pDataSourceBase, &relativeCursor); + if (result != MA_SUCCESS) { + /* Failed to retrieve the cursor. Cannot read within a range or loop points. Just read like normal - this may happen for things like noise data sources where it doesn't really matter. */ + result = pDataSourceBase->vtable->onRead(pDataSourceBase, pFramesOut, frameCount, &framesRead); + } else { + ma_uint64 rangeBeg; + ma_uint64 rangeEnd; + + /* We have the cursor. We need to make sure we don't read beyond our range. */ + rangeBeg = pDataSourceBase->rangeBegInFrames; + rangeEnd = pDataSourceBase->rangeEndInFrames; + + absoluteCursor = rangeBeg + relativeCursor; + + /* If looping, make sure we're within range. */ + if (loop) { + if (pDataSourceBase->loopEndInFrames != ~((ma_uint64)0)) { + rangeEnd = ma_min(rangeEnd, pDataSourceBase->rangeBegInFrames + pDataSourceBase->loopEndInFrames); + } + } + + if (frameCount > (rangeEnd - absoluteCursor) && rangeEnd != ~((ma_uint64)0)) { + frameCount = (rangeEnd - absoluteCursor); + } + + /* + If the cursor is sitting on the end of the range the frame count will be set to 0 which can + result in MA_INVALID_ARGS. In this case, we don't want to try reading, but instead return + MA_AT_END so the higher level function can know about it. + */ + if (frameCount > 0) { + result = pDataSourceBase->vtable->onRead(pDataSourceBase, pFramesOut, frameCount, &framesRead); + } else { + result = MA_AT_END; /* The cursor is sitting on the end of the range which means we're at the end. */ + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + /* We need to make sure MA_AT_END is returned if we hit the end of the range. */ + if (result == MA_SUCCESS && framesRead == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_data_source_read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_data_source_base* pCurrentDataSource; + void* pRunningFramesOut = pFramesOut; + ma_uint64 totalFramesProcessed = 0; + ma_format format; + ma_uint32 channels; + ma_uint32 emptyLoopCounter = 0; /* Keeps track of how many times 0 frames have been read. For infinite loop detection of sounds with no audio data. */ + ma_bool32 loop; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pDataSourceBase == NULL) { + return MA_INVALID_ARGS; + } + + loop = ma_data_source_is_looping(pDataSource); + + /* + We need to know the data format so we can advance the output buffer as we read frames. If this + fails, chaining will not work and we'll just read as much as we can from the current source. + */ + if (ma_data_source_get_data_format(pDataSource, &format, &channels, NULL, NULL, 0) != MA_SUCCESS) { + result = ma_data_source_resolve_current(pDataSource, (ma_data_source**)&pCurrentDataSource); + if (result != MA_SUCCESS) { + return result; + } + + return ma_data_source_read_pcm_frames_within_range(pCurrentDataSource, pFramesOut, frameCount, pFramesRead); + } + + /* + Looping is a bit of a special case. When the `loop` argument is true, chaining will not work and + only the current data source will be read from. + */ + + /* Keep reading until we've read as many frames as possible. */ + while (totalFramesProcessed < frameCount) { + ma_uint64 framesProcessed; + ma_uint64 framesRemaining = frameCount - totalFramesProcessed; + + /* We need to resolve the data source that we'll actually be reading from. */ + result = ma_data_source_resolve_current(pDataSource, (ma_data_source**)&pCurrentDataSource); + if (result != MA_SUCCESS) { + break; + } + + if (pCurrentDataSource == NULL) { + break; + } + + result = ma_data_source_read_pcm_frames_within_range(pCurrentDataSource, pRunningFramesOut, framesRemaining, &framesProcessed); + totalFramesProcessed += framesProcessed; + + /* + If we encounted an error from the read callback, make sure it's propagated to the caller. The caller may need to know whether or not MA_BUSY is returned which is + not necessarily considered an error. + */ + if (result != MA_SUCCESS && result != MA_AT_END) { + break; + } + + /* + We can determine if we've reached the end by checking if ma_data_source_read_pcm_frames_within_range() returned + MA_AT_END. To loop back to the start, all we need to do is seek back to the first frame. + */ + if (result == MA_AT_END) { + /* + The result needs to be reset back to MA_SUCCESS (from MA_AT_END) so that we don't + accidentally return MA_AT_END when data has been read in prior loop iterations. at the + end of this function, the result will be checked for MA_SUCCESS, and if the total + number of frames processed is 0, will be explicitly set to MA_AT_END. + */ + result = MA_SUCCESS; + + /* + We reached the end. If we're looping, we just loop back to the start of the current + data source. If we're not looping we need to check if we have another in the chain, and + if so, switch to it. + */ + if (loop) { + if (framesProcessed == 0) { + emptyLoopCounter += 1; + if (emptyLoopCounter > 1) { + break; /* Infinite loop detected. Get out. */ + } + } else { + emptyLoopCounter = 0; + } + + result = ma_data_source_seek_to_pcm_frame(pCurrentDataSource, pCurrentDataSource->loopBegInFrames); + if (result != MA_SUCCESS) { + break; /* Failed to loop. Abort. */ + } + + /* Don't return MA_AT_END for looping sounds. */ + result = MA_SUCCESS; + } else { + if (pCurrentDataSource->pNext != NULL) { + pDataSourceBase->pCurrent = pCurrentDataSource->pNext; + } else if (pCurrentDataSource->onGetNext != NULL) { + pDataSourceBase->pCurrent = pCurrentDataSource->onGetNext(pCurrentDataSource); + if (pDataSourceBase->pCurrent == NULL) { + break; /* Our callback did not return a next data source. We're done. */ + } + } else { + /* Reached the end of the chain. We're done. */ + break; + } + + /* The next data source needs to be rewound to ensure data is read in looping scenarios. */ + result = ma_data_source_seek_to_pcm_frame(pDataSourceBase->pCurrent, 0); + if (result != MA_SUCCESS) { + break; + } + } + } + + if (pRunningFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesProcessed * ma_get_bytes_per_frame(format, channels)); + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesProcessed; + } + + MA_ASSERT(!(result == MA_AT_END && totalFramesProcessed > 0)); /* We should never be returning MA_AT_END if we read some data. */ + + if (result == MA_SUCCESS && totalFramesProcessed == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_data_source_seek_pcm_frames(ma_data_source* pDataSource, ma_uint64 frameCount, ma_uint64* pFramesSeeked) +{ + return ma_data_source_read_pcm_frames(pDataSource, NULL, frameCount, pFramesSeeked); +} + +MA_API ma_result ma_data_source_seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSourceBase == NULL) { + return MA_SUCCESS; + } + + if (pDataSourceBase->vtable->onSeek == NULL) { + return MA_NOT_IMPLEMENTED; + } + + if (frameIndex > pDataSourceBase->rangeEndInFrames) { + return MA_INVALID_OPERATION; /* Trying to seek to far forward. */ + } + + return pDataSourceBase->vtable->onSeek(pDataSource, pDataSourceBase->rangeBegInFrames + frameIndex); +} + +MA_API ma_result ma_data_source_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + + /* Initialize to defaults for safety just in case the data source does not implement this callback. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pDataSourceBase == NULL) { + return MA_INVALID_ARGS; + } + + if (pDataSourceBase->vtable->onGetDataFormat == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pDataSourceBase->vtable->onGetDataFormat(pDataSource, &format, &channels, &sampleRate, pChannelMap, channelMapCap); + if (result != MA_SUCCESS) { + return result; + } + + if (pFormat != NULL) { + *pFormat = format; + } + if (pChannels != NULL) { + *pChannels = channels; + } + if (pSampleRate != NULL) { + *pSampleRate = sampleRate; + } + + /* Channel map was passed in directly to the callback. This is safe due to the channelMapCap parameter. */ + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_uint64 cursor; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pDataSourceBase == NULL) { + return MA_SUCCESS; + } + + if (pDataSourceBase->vtable->onGetCursor == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pDataSourceBase->vtable->onGetCursor(pDataSourceBase, &cursor); + if (result != MA_SUCCESS) { + return result; + } + + /* The cursor needs to be made relative to the start of the range. */ + if (cursor < pDataSourceBase->rangeBegInFrames) { /* Safety check so we don't return some huge number. */ + *pCursor = 0; + } else { + *pCursor = cursor - pDataSourceBase->rangeBegInFrames; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pDataSourceBase == NULL) { + return MA_INVALID_ARGS; + } + + /* + If we have a range defined we'll use that to determine the length. This is one of rare times + where we'll actually trust the caller. If they've set the range, I think it's mostly safe to + assume they've set it based on some higher level knowledge of the structure of the sound bank. + */ + if (pDataSourceBase->rangeEndInFrames != ~((ma_uint64)0)) { + *pLength = pDataSourceBase->rangeEndInFrames - pDataSourceBase->rangeBegInFrames; + return MA_SUCCESS; + } + + /* + Getting here means a range is not defined so we'll need to get the data source itself to tell + us the length. + */ + if (pDataSourceBase->vtable->onGetLength == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pDataSourceBase->vtable->onGetLength(pDataSource, pLength); +} + +MA_API ma_result ma_data_source_get_cursor_in_seconds(ma_data_source* pDataSource, float* pCursor) +{ + ma_result result; + ma_uint64 cursorInPCMFrames; + ma_uint32 sampleRate; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursorInPCMFrames); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_data_source_get_data_format(pDataSource, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* VC6 does not support division of unsigned 64-bit integers with floating point numbers. Need to use a signed number. This shouldn't effect anything in practice. */ + *pCursor = (ma_int64)cursorInPCMFrames / (float)sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_get_length_in_seconds(ma_data_source* pDataSource, float* pLength) +{ + ma_result result; + ma_uint64 lengthInPCMFrames; + ma_uint32 sampleRate; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + result = ma_data_source_get_length_in_pcm_frames(pDataSource, &lengthInPCMFrames); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_data_source_get_data_format(pDataSource, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* VC6 does not support division of unsigned 64-bit integers with floating point numbers. Need to use a signed number. This shouldn't effect anything in practice. */ + *pLength = (ma_int64)lengthInPCMFrames / (float)sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_set_looping(ma_data_source* pDataSource, ma_bool32 isLooping) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_32(&pDataSourceBase->isLooping, isLooping); + + /* If there's no callback for this just treat it as a successful no-op. */ + if (pDataSourceBase->vtable->onSetLooping == NULL) { + return MA_SUCCESS; + } + + return pDataSourceBase->vtable->onSetLooping(pDataSource, isLooping); +} + +MA_API ma_bool32 ma_data_source_is_looping(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_FALSE; + } + + return ma_atomic_load_32(&pDataSourceBase->isLooping); +} + +MA_API ma_result ma_data_source_set_range_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 rangeBegInFrames, ma_uint64 rangeEndInFrames) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_uint64 relativeCursor; + ma_uint64 absoluteCursor; + ma_bool32 doSeekAdjustment = MA_FALSE; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if (rangeEndInFrames < rangeBegInFrames) { + return MA_INVALID_ARGS; /* The end of the range must come after the beginning. */ + } + + /* + We may need to adjust the position of the cursor to ensure it's clamped to the range. Grab it now + so we can calculate it's absolute position before we change the range. + */ + result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &relativeCursor); + if (result == MA_SUCCESS) { + doSeekAdjustment = MA_TRUE; + absoluteCursor = relativeCursor + pDataSourceBase->rangeBegInFrames; + } else { + /* + We couldn't get the position of the cursor. It probably means the data source has no notion + of a cursor. We'll just leave it at position 0. Don't treat this as an error. + */ + doSeekAdjustment = MA_FALSE; + relativeCursor = 0; + absoluteCursor = 0; + } + + pDataSourceBase->rangeBegInFrames = rangeBegInFrames; + pDataSourceBase->rangeEndInFrames = rangeEndInFrames; + + /* + The commented out logic below was intended to maintain loop points in response to a change in the + range. However, this is not useful because it results in the sound breaking when you move the range + outside of the old loop points. I'm simplifying this by simply resetting the loop points. The + caller is expected to update their loop points if they change the range. + + In practice this should be mostly a non-issue because the majority of the time the range will be + set once right after initialization. + */ + pDataSourceBase->loopBegInFrames = 0; + pDataSourceBase->loopEndInFrames = ~((ma_uint64)0); + + + /* + Seek to within range. Note that our seek positions here are relative to the new range. We don't want + do do this if we failed to retrieve the cursor earlier on because it probably means the data source + has no notion of a cursor. In practice the seek would probably fail (which we silently ignore), but + I'm just not even going to attempt it. + */ + if (doSeekAdjustment) { + if (absoluteCursor < rangeBegInFrames) { + ma_data_source_seek_to_pcm_frame(pDataSource, 0); + } else if (absoluteCursor > rangeEndInFrames) { + ma_data_source_seek_to_pcm_frame(pDataSource, rangeEndInFrames - rangeBegInFrames); + } + } + + return MA_SUCCESS; +} + +MA_API void ma_data_source_get_range_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pRangeBegInFrames, ma_uint64* pRangeEndInFrames) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return; + } + + if (pRangeBegInFrames != NULL) { + *pRangeBegInFrames = pDataSourceBase->rangeBegInFrames; + } + + if (pRangeEndInFrames != NULL) { + *pRangeEndInFrames = pDataSourceBase->rangeEndInFrames; + } +} + +MA_API ma_result ma_data_source_set_loop_point_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 loopBegInFrames, ma_uint64 loopEndInFrames) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if (loopEndInFrames < loopBegInFrames) { + return MA_INVALID_ARGS; /* The end of the loop point must come after the beginning. */ + } + + if (loopEndInFrames > pDataSourceBase->rangeEndInFrames && loopEndInFrames != ~((ma_uint64)0)) { + return MA_INVALID_ARGS; /* The end of the loop point must not go beyond the range. */ + } + + pDataSourceBase->loopBegInFrames = loopBegInFrames; + pDataSourceBase->loopEndInFrames = loopEndInFrames; + + /* The end cannot exceed the range. */ + if (pDataSourceBase->loopEndInFrames > (pDataSourceBase->rangeEndInFrames - pDataSourceBase->rangeBegInFrames) && pDataSourceBase->loopEndInFrames != ~((ma_uint64)0)) { + pDataSourceBase->loopEndInFrames = (pDataSourceBase->rangeEndInFrames - pDataSourceBase->rangeBegInFrames); + } + + return MA_SUCCESS; +} + +MA_API void ma_data_source_get_loop_point_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pLoopBegInFrames, ma_uint64* pLoopEndInFrames) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return; + } + + if (pLoopBegInFrames != NULL) { + *pLoopBegInFrames = pDataSourceBase->loopBegInFrames; + } + + if (pLoopEndInFrames != NULL) { + *pLoopEndInFrames = pDataSourceBase->loopEndInFrames; + } +} + +MA_API ma_result ma_data_source_set_current(ma_data_source* pDataSource, ma_data_source* pCurrentDataSource) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->pCurrent = pCurrentDataSource; + + return MA_SUCCESS; +} + +MA_API ma_data_source* ma_data_source_get_current(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return NULL; + } + + return pDataSourceBase->pCurrent; +} + +MA_API ma_result ma_data_source_set_next(ma_data_source* pDataSource, ma_data_source* pNextDataSource) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->pNext = pNextDataSource; + + return MA_SUCCESS; +} + +MA_API ma_data_source* ma_data_source_get_next(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return NULL; + } + + return pDataSourceBase->pNext; +} + +MA_API ma_result ma_data_source_set_next_callback(ma_data_source* pDataSource, ma_data_source_get_next_proc onGetNext) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->onGetNext = onGetNext; + + return MA_SUCCESS; +} + +MA_API ma_data_source_get_next_proc ma_data_source_get_next_callback(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return NULL; + } + + return pDataSourceBase->onGetNext; +} + + +static ma_result ma_audio_buffer_ref__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + ma_uint64 framesRead = ma_audio_buffer_ref_read_pcm_frames(pAudioBufferRef, pFramesOut, frameCount, MA_FALSE); + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + if (framesRead < frameCount || framesRead == 0) { + return MA_AT_END; + } + + return MA_SUCCESS; +} + +static ma_result ma_audio_buffer_ref__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_audio_buffer_ref_seek_to_pcm_frame((ma_audio_buffer_ref*)pDataSource, frameIndex); +} + +static ma_result ma_audio_buffer_ref__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + + *pFormat = pAudioBufferRef->format; + *pChannels = pAudioBufferRef->channels; + *pSampleRate = pAudioBufferRef->sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pAudioBufferRef->channels); + + return MA_SUCCESS; +} + +static ma_result ma_audio_buffer_ref__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + + *pCursor = pAudioBufferRef->cursor; + + return MA_SUCCESS; +} + +static ma_result ma_audio_buffer_ref__data_source_on_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + + *pLength = pAudioBufferRef->sizeInFrames; + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_audio_buffer_ref_data_source_vtable = +{ + ma_audio_buffer_ref__data_source_on_read, + ma_audio_buffer_ref__data_source_on_seek, + ma_audio_buffer_ref__data_source_on_get_data_format, + ma_audio_buffer_ref__data_source_on_get_cursor, + ma_audio_buffer_ref__data_source_on_get_length, + NULL, /* onSetLooping */ + 0 +}; + +MA_API ma_result ma_audio_buffer_ref_init(ma_format format, ma_uint32 channels, const void* pData, ma_uint64 sizeInFrames, ma_audio_buffer_ref* pAudioBufferRef) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pAudioBufferRef); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_audio_buffer_ref_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pAudioBufferRef->ds); + if (result != MA_SUCCESS) { + return result; + } + + pAudioBufferRef->format = format; + pAudioBufferRef->channels = channels; + pAudioBufferRef->sampleRate = 0; /* TODO: Version 0.12. Set this to sampleRate. */ + pAudioBufferRef->cursor = 0; + pAudioBufferRef->sizeInFrames = sizeInFrames; + pAudioBufferRef->pData = pData; + + return MA_SUCCESS; +} + +MA_API void ma_audio_buffer_ref_uninit(ma_audio_buffer_ref* pAudioBufferRef) +{ + if (pAudioBufferRef == NULL) { + return; + } + + ma_data_source_uninit(&pAudioBufferRef->ds); +} + +MA_API ma_result ma_audio_buffer_ref_set_data(ma_audio_buffer_ref* pAudioBufferRef, const void* pData, ma_uint64 sizeInFrames) +{ + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + pAudioBufferRef->cursor = 0; + pAudioBufferRef->sizeInFrames = sizeInFrames; + pAudioBufferRef->pData = pData; + + return MA_SUCCESS; +} + +MA_API ma_uint64 ma_audio_buffer_ref_read_pcm_frames(ma_audio_buffer_ref* pAudioBufferRef, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop) +{ + ma_uint64 totalFramesRead = 0; + + if (pAudioBufferRef == NULL) { + return 0; + } + + if (frameCount == 0) { + return 0; + } + + while (totalFramesRead < frameCount) { + ma_uint64 framesAvailable = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + ma_uint64 framesRemaining = frameCount - totalFramesRead; + ma_uint64 framesToRead; + + framesToRead = framesRemaining; + if (framesToRead > framesAvailable) { + framesToRead = framesAvailable; + } + + if (pFramesOut != NULL) { + ma_copy_pcm_frames(ma_offset_ptr(pFramesOut, totalFramesRead * ma_get_bytes_per_frame(pAudioBufferRef->format, pAudioBufferRef->channels)), ma_offset_ptr(pAudioBufferRef->pData, pAudioBufferRef->cursor * ma_get_bytes_per_frame(pAudioBufferRef->format, pAudioBufferRef->channels)), framesToRead, pAudioBufferRef->format, pAudioBufferRef->channels); + } + + totalFramesRead += framesToRead; + + pAudioBufferRef->cursor += framesToRead; + if (pAudioBufferRef->cursor == pAudioBufferRef->sizeInFrames) { + if (loop) { + pAudioBufferRef->cursor = 0; + } else { + break; /* We've reached the end and we're not looping. Done. */ + } + } + + MA_ASSERT(pAudioBufferRef->cursor < pAudioBufferRef->sizeInFrames); + } + + return totalFramesRead; +} + +MA_API ma_result ma_audio_buffer_ref_seek_to_pcm_frame(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameIndex) +{ + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + if (frameIndex > pAudioBufferRef->sizeInFrames) { + return MA_INVALID_ARGS; + } + + pAudioBufferRef->cursor = (size_t)frameIndex; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_map(ma_audio_buffer_ref* pAudioBufferRef, void** ppFramesOut, ma_uint64* pFrameCount) +{ + ma_uint64 framesAvailable; + ma_uint64 frameCount = 0; + + if (ppFramesOut != NULL) { + *ppFramesOut = NULL; /* Safety. */ + } + + if (pFrameCount != NULL) { + frameCount = *pFrameCount; + *pFrameCount = 0; /* Safety. */ + } + + if (pAudioBufferRef == NULL || ppFramesOut == NULL || pFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + framesAvailable = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + if (frameCount > framesAvailable) { + frameCount = framesAvailable; + } + + *ppFramesOut = ma_offset_ptr(pAudioBufferRef->pData, pAudioBufferRef->cursor * ma_get_bytes_per_frame(pAudioBufferRef->format, pAudioBufferRef->channels)); + *pFrameCount = frameCount; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_unmap(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameCount) +{ + ma_uint64 framesAvailable; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + framesAvailable = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + if (frameCount > framesAvailable) { + return MA_INVALID_ARGS; /* The frame count was too big. This should never happen in an unmapping. Need to make sure the caller is aware of this. */ + } + + pAudioBufferRef->cursor += frameCount; + + if (pAudioBufferRef->cursor == pAudioBufferRef->sizeInFrames) { + return MA_AT_END; /* Successful. Need to tell the caller that the end has been reached so that it can loop if desired. */ + } else { + return MA_SUCCESS; + } +} + +MA_API ma_bool32 ma_audio_buffer_ref_at_end(const ma_audio_buffer_ref* pAudioBufferRef) +{ + if (pAudioBufferRef == NULL) { + return MA_FALSE; + } + + return pAudioBufferRef->cursor == pAudioBufferRef->sizeInFrames; +} + +MA_API ma_result ma_audio_buffer_ref_get_cursor_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = pAudioBufferRef->cursor; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_get_length_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = pAudioBufferRef->sizeInFrames; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_get_available_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + if (pAudioBufferRef->sizeInFrames <= pAudioBufferRef->cursor) { + *pAvailableFrames = 0; + } else { + *pAvailableFrames = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + } + + return MA_SUCCESS; +} + + + + +MA_API ma_audio_buffer_config ma_audio_buffer_config_init(ma_format format, ma_uint32 channels, ma_uint64 sizeInFrames, const void* pData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_audio_buffer_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = 0; /* TODO: Version 0.12. Set this to sampleRate. */ + config.sizeInFrames = sizeInFrames; + config.pData = pData; + ma_allocation_callbacks_init_copy(&config.allocationCallbacks, pAllocationCallbacks); + + return config; +} + +static ma_result ma_audio_buffer_init_ex(const ma_audio_buffer_config* pConfig, ma_bool32 doCopy, ma_audio_buffer* pAudioBuffer) +{ + ma_result result; + + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_MEMORY(pAudioBuffer, sizeof(*pAudioBuffer) - sizeof(pAudioBuffer->_pExtraData)); /* Safety. Don't overwrite the extra data. */ + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->sizeInFrames == 0) { + return MA_INVALID_ARGS; /* Not allowing buffer sizes of 0 frames. */ + } + + result = ma_audio_buffer_ref_init(pConfig->format, pConfig->channels, NULL, 0, &pAudioBuffer->ref); + if (result != MA_SUCCESS) { + return result; + } + + /* TODO: Version 0.12. Set this in ma_audio_buffer_ref_init() instead of here. */ + pAudioBuffer->ref.sampleRate = pConfig->sampleRate; + + ma_allocation_callbacks_init_copy(&pAudioBuffer->allocationCallbacks, &pConfig->allocationCallbacks); + + if (doCopy) { + ma_uint64 allocationSizeInBytes; + void* pData; + + allocationSizeInBytes = pConfig->sizeInFrames * ma_get_bytes_per_frame(pConfig->format, pConfig->channels); + if (allocationSizeInBytes > MA_SIZE_MAX) { + return MA_OUT_OF_MEMORY; /* Too big. */ + } + + pData = ma_malloc((size_t)allocationSizeInBytes, &pAudioBuffer->allocationCallbacks); /* Safe cast to size_t. */ + if (pData == NULL) { + return MA_OUT_OF_MEMORY; + } + + if (pConfig->pData != NULL) { + ma_copy_pcm_frames(pData, pConfig->pData, pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } else { + ma_silence_pcm_frames(pData, pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } + + ma_audio_buffer_ref_set_data(&pAudioBuffer->ref, pData, pConfig->sizeInFrames); + pAudioBuffer->ownsData = MA_TRUE; + } else { + ma_audio_buffer_ref_set_data(&pAudioBuffer->ref, pConfig->pData, pConfig->sizeInFrames); + pAudioBuffer->ownsData = MA_FALSE; + } + + return MA_SUCCESS; +} + +static void ma_audio_buffer_uninit_ex(ma_audio_buffer* pAudioBuffer, ma_bool32 doFree) +{ + if (pAudioBuffer == NULL) { + return; + } + + if (pAudioBuffer->ownsData && pAudioBuffer->ref.pData != &pAudioBuffer->_pExtraData[0]) { + ma_free((void*)pAudioBuffer->ref.pData, &pAudioBuffer->allocationCallbacks); /* Naugty const cast, but OK in this case since we've guarded it with the ownsData check. */ + } + + if (doFree) { + ma_free(pAudioBuffer, &pAudioBuffer->allocationCallbacks); + } + + ma_audio_buffer_ref_uninit(&pAudioBuffer->ref); +} + +MA_API ma_result ma_audio_buffer_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer) +{ + return ma_audio_buffer_init_ex(pConfig, MA_FALSE, pAudioBuffer); +} + +MA_API ma_result ma_audio_buffer_init_copy(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer) +{ + return ma_audio_buffer_init_ex(pConfig, MA_TRUE, pAudioBuffer); +} + +MA_API ma_result ma_audio_buffer_alloc_and_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer** ppAudioBuffer) +{ + ma_result result; + ma_audio_buffer* pAudioBuffer; + ma_audio_buffer_config innerConfig; /* We'll be making some changes to the config, so need to make a copy. */ + ma_uint64 allocationSizeInBytes; + + if (ppAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + *ppAudioBuffer = NULL; /* Safety. */ + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + innerConfig = *pConfig; + ma_allocation_callbacks_init_copy(&innerConfig.allocationCallbacks, &pConfig->allocationCallbacks); + + allocationSizeInBytes = sizeof(*pAudioBuffer) - sizeof(pAudioBuffer->_pExtraData) + (pConfig->sizeInFrames * ma_get_bytes_per_frame(pConfig->format, pConfig->channels)); + if (allocationSizeInBytes > MA_SIZE_MAX) { + return MA_OUT_OF_MEMORY; /* Too big. */ + } + + pAudioBuffer = (ma_audio_buffer*)ma_malloc((size_t)allocationSizeInBytes, &innerConfig.allocationCallbacks); /* Safe cast to size_t. */ + if (pAudioBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + if (pConfig->pData != NULL) { + ma_copy_pcm_frames(&pAudioBuffer->_pExtraData[0], pConfig->pData, pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } else { + ma_silence_pcm_frames(&pAudioBuffer->_pExtraData[0], pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } + + innerConfig.pData = &pAudioBuffer->_pExtraData[0]; + + result = ma_audio_buffer_init_ex(&innerConfig, MA_FALSE, pAudioBuffer); + if (result != MA_SUCCESS) { + ma_free(pAudioBuffer, &innerConfig.allocationCallbacks); + return result; + } + + *ppAudioBuffer = pAudioBuffer; + + return MA_SUCCESS; +} + +MA_API void ma_audio_buffer_uninit(ma_audio_buffer* pAudioBuffer) +{ + ma_audio_buffer_uninit_ex(pAudioBuffer, MA_FALSE); +} + +MA_API void ma_audio_buffer_uninit_and_free(ma_audio_buffer* pAudioBuffer) +{ + ma_audio_buffer_uninit_ex(pAudioBuffer, MA_TRUE); +} + +MA_API ma_uint64 ma_audio_buffer_read_pcm_frames(ma_audio_buffer* pAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop) +{ + if (pAudioBuffer == NULL) { + return 0; + } + + return ma_audio_buffer_ref_read_pcm_frames(&pAudioBuffer->ref, pFramesOut, frameCount, loop); +} + +MA_API ma_result ma_audio_buffer_seek_to_pcm_frame(ma_audio_buffer* pAudioBuffer, ma_uint64 frameIndex) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_seek_to_pcm_frame(&pAudioBuffer->ref, frameIndex); +} + +MA_API ma_result ma_audio_buffer_map(ma_audio_buffer* pAudioBuffer, void** ppFramesOut, ma_uint64* pFrameCount) +{ + if (ppFramesOut != NULL) { + *ppFramesOut = NULL; /* Safety. */ + } + + if (pAudioBuffer == NULL) { + if (pFrameCount != NULL) { + *pFrameCount = 0; + } + + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_map(&pAudioBuffer->ref, ppFramesOut, pFrameCount); +} + +MA_API ma_result ma_audio_buffer_unmap(ma_audio_buffer* pAudioBuffer, ma_uint64 frameCount) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_unmap(&pAudioBuffer->ref, frameCount); +} + +MA_API ma_bool32 ma_audio_buffer_at_end(const ma_audio_buffer* pAudioBuffer) +{ + if (pAudioBuffer == NULL) { + return MA_FALSE; + } + + return ma_audio_buffer_ref_at_end(&pAudioBuffer->ref); +} + +MA_API ma_result ma_audio_buffer_get_cursor_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pCursor) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_get_cursor_in_pcm_frames(&pAudioBuffer->ref, pCursor); +} + +MA_API ma_result ma_audio_buffer_get_length_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pLength) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_get_length_in_pcm_frames(&pAudioBuffer->ref, pLength); +} + +MA_API ma_result ma_audio_buffer_get_available_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_get_available_frames(&pAudioBuffer->ref, pAvailableFrames); +} + + + + + +MA_API ma_result ma_paged_audio_buffer_data_init(ma_format format, ma_uint32 channels, ma_paged_audio_buffer_data* pData) +{ + if (pData == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pData); + + pData->format = format; + pData->channels = channels; + pData->pTail = &pData->head; + + return MA_SUCCESS; +} + +MA_API void ma_paged_audio_buffer_data_uninit(ma_paged_audio_buffer_data* pData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_paged_audio_buffer_page* pPage; + + if (pData == NULL) { + return; + } + + /* All pages need to be freed. */ + pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pData->head.pNext); + while (pPage != NULL) { + ma_paged_audio_buffer_page* pNext = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPage->pNext); + + ma_free(pPage, pAllocationCallbacks); + pPage = pNext; + } +} + +MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_head(ma_paged_audio_buffer_data* pData) +{ + if (pData == NULL) { + return NULL; + } + + return &pData->head; +} + +MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_tail(ma_paged_audio_buffer_data* pData) +{ + if (pData == NULL) { + return NULL; + } + + return pData->pTail; +} + +MA_API ma_result ma_paged_audio_buffer_data_get_length_in_pcm_frames(ma_paged_audio_buffer_data* pData, ma_uint64* pLength) +{ + ma_paged_audio_buffer_page* pPage; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pData == NULL) { + return MA_INVALID_ARGS; + } + + /* Calculate the length from the linked list. */ + for (pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pData->head.pNext); pPage != NULL; pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPage->pNext)) { + *pLength += pPage->sizeInFrames; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_allocate_page(ma_paged_audio_buffer_data* pData, ma_uint64 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks, ma_paged_audio_buffer_page** ppPage) +{ + ma_paged_audio_buffer_page* pPage; + ma_uint64 allocationSize; + + if (ppPage == NULL) { + return MA_INVALID_ARGS; + } + + *ppPage = NULL; + + if (pData == NULL) { + return MA_INVALID_ARGS; + } + + allocationSize = sizeof(*pPage) + (pageSizeInFrames * ma_get_bytes_per_frame(pData->format, pData->channels)); + if (allocationSize > MA_SIZE_MAX) { + return MA_OUT_OF_MEMORY; /* Too big. */ + } + + pPage = (ma_paged_audio_buffer_page*)ma_malloc((size_t)allocationSize, pAllocationCallbacks); /* Safe cast to size_t. */ + if (pPage == NULL) { + return MA_OUT_OF_MEMORY; + } + + pPage->pNext = NULL; + pPage->sizeInFrames = pageSizeInFrames; + + if (pInitialData != NULL) { + ma_copy_pcm_frames(pPage->pAudioData, pInitialData, pageSizeInFrames, pData->format, pData->channels); + } + + *ppPage = pPage; + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_free_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pData == NULL || pPage == NULL) { + return MA_INVALID_ARGS; + } + + /* It's assumed the page is not attached to the list. */ + ma_free(pPage, pAllocationCallbacks); + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_append_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage) +{ + if (pData == NULL || pPage == NULL) { + return MA_INVALID_ARGS; + } + + /* This function assumes the page has been filled with audio data by this point. As soon as we append, the page will be available for reading. */ + + /* First thing to do is update the tail. */ + for (;;) { + ma_paged_audio_buffer_page* pOldTail = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pData->pTail); + ma_paged_audio_buffer_page* pNewTail = pPage; + + if (ma_atomic_compare_exchange_weak_ptr((volatile void**)&pData->pTail, (void**)&pOldTail, pNewTail)) { + /* Here is where we append the page to the list. After this, the page is attached to the list and ready to be read from. */ + ma_atomic_exchange_ptr(&pOldTail->pNext, pPage); + break; /* Done. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_allocate_and_append_page(ma_paged_audio_buffer_data* pData, ma_uint32 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_paged_audio_buffer_page* pPage; + + result = ma_paged_audio_buffer_data_allocate_page(pData, pageSizeInFrames, pInitialData, pAllocationCallbacks, &pPage); + if (result != MA_SUCCESS) { + return result; + } + + return ma_paged_audio_buffer_data_append_page(pData, pPage); /* <-- Should never fail. */ +} + + +MA_API ma_paged_audio_buffer_config ma_paged_audio_buffer_config_init(ma_paged_audio_buffer_data* pData) +{ + ma_paged_audio_buffer_config config; + + MA_ZERO_OBJECT(&config); + config.pData = pData; + + return config; +} + + +static ma_result ma_paged_audio_buffer__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_paged_audio_buffer_read_pcm_frames((ma_paged_audio_buffer*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_paged_audio_buffer__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_paged_audio_buffer_seek_to_pcm_frame((ma_paged_audio_buffer*)pDataSource, frameIndex); +} + +static ma_result ma_paged_audio_buffer__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_paged_audio_buffer* pPagedAudioBuffer = (ma_paged_audio_buffer*)pDataSource; + + *pFormat = pPagedAudioBuffer->pData->format; + *pChannels = pPagedAudioBuffer->pData->channels; + *pSampleRate = 0; /* There is no notion of a sample rate with audio buffers. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pPagedAudioBuffer->pData->channels); + + return MA_SUCCESS; +} + +static ma_result ma_paged_audio_buffer__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_paged_audio_buffer_get_cursor_in_pcm_frames((ma_paged_audio_buffer*)pDataSource, pCursor); +} + +static ma_result ma_paged_audio_buffer__data_source_on_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_paged_audio_buffer_get_length_in_pcm_frames((ma_paged_audio_buffer*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_paged_audio_buffer_data_source_vtable = +{ + ma_paged_audio_buffer__data_source_on_read, + ma_paged_audio_buffer__data_source_on_seek, + ma_paged_audio_buffer__data_source_on_get_data_format, + ma_paged_audio_buffer__data_source_on_get_cursor, + ma_paged_audio_buffer__data_source_on_get_length, + NULL, /* onSetLooping */ + 0 +}; + +MA_API ma_result ma_paged_audio_buffer_init(const ma_paged_audio_buffer_config* pConfig, ma_paged_audio_buffer* pPagedAudioBuffer) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pPagedAudioBuffer); + + /* A config is required for the format and channel count. */ + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pData == NULL) { + return MA_INVALID_ARGS; /* No underlying data specified. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_paged_audio_buffer_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pPagedAudioBuffer->ds); + if (result != MA_SUCCESS) { + return result; + } + + pPagedAudioBuffer->pData = pConfig->pData; + pPagedAudioBuffer->pCurrent = ma_paged_audio_buffer_data_get_head(pConfig->pData); + pPagedAudioBuffer->relativeCursor = 0; + pPagedAudioBuffer->absoluteCursor = 0; + + return MA_SUCCESS; +} + +MA_API void ma_paged_audio_buffer_uninit(ma_paged_audio_buffer* pPagedAudioBuffer) +{ + if (pPagedAudioBuffer == NULL) { + return; + } + + /* Nothing to do. The data needs to be deleted separately. */ +} + +MA_API ma_result ma_paged_audio_buffer_read_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesRead = 0; + ma_format format; + ma_uint32 channels; + + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + format = pPagedAudioBuffer->pData->format; + channels = pPagedAudioBuffer->pData->channels; + + while (totalFramesRead < frameCount) { + /* Read from the current page. The buffer should never be in a state where this is NULL. */ + ma_uint64 framesRemainingInCurrentPage; + ma_uint64 framesRemainingToRead = frameCount - totalFramesRead; + ma_uint64 framesToReadThisIteration; + + MA_ASSERT(pPagedAudioBuffer->pCurrent != NULL); + + framesRemainingInCurrentPage = pPagedAudioBuffer->pCurrent->sizeInFrames - pPagedAudioBuffer->relativeCursor; + + framesToReadThisIteration = ma_min(framesRemainingInCurrentPage, framesRemainingToRead); + ma_copy_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, format, channels), ma_offset_pcm_frames_ptr(pPagedAudioBuffer->pCurrent->pAudioData, pPagedAudioBuffer->relativeCursor, format, channels), framesToReadThisIteration, format, channels); + totalFramesRead += framesToReadThisIteration; + + pPagedAudioBuffer->absoluteCursor += framesToReadThisIteration; + pPagedAudioBuffer->relativeCursor += framesToReadThisIteration; + + /* Move to the next page if necessary. If there's no more pages, we need to return MA_AT_END. */ + MA_ASSERT(pPagedAudioBuffer->relativeCursor <= pPagedAudioBuffer->pCurrent->sizeInFrames); + + if (pPagedAudioBuffer->relativeCursor == pPagedAudioBuffer->pCurrent->sizeInFrames) { + /* We reached the end of the page. Need to move to the next. If there's no more pages, we're done. */ + ma_paged_audio_buffer_page* pNext = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPagedAudioBuffer->pCurrent->pNext); + if (pNext == NULL) { + result = MA_AT_END; + break; /* We've reached the end. */ + } else { + pPagedAudioBuffer->pCurrent = pNext; + pPagedAudioBuffer->relativeCursor = 0; + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; +} + +MA_API ma_result ma_paged_audio_buffer_seek_to_pcm_frame(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64 frameIndex) +{ + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + if (frameIndex == pPagedAudioBuffer->absoluteCursor) { + return MA_SUCCESS; /* Nothing to do. */ + } + + if (frameIndex < pPagedAudioBuffer->absoluteCursor) { + /* Moving backwards. Need to move the cursor back to the start, and then move forward. */ + pPagedAudioBuffer->pCurrent = ma_paged_audio_buffer_data_get_head(pPagedAudioBuffer->pData); + pPagedAudioBuffer->absoluteCursor = 0; + pPagedAudioBuffer->relativeCursor = 0; + + /* Fall through to the forward seeking section below. */ + } + + if (frameIndex > pPagedAudioBuffer->absoluteCursor) { + /* Moving forward. */ + ma_paged_audio_buffer_page* pPage; + ma_uint64 runningCursor = 0; + + for (pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&ma_paged_audio_buffer_data_get_head(pPagedAudioBuffer->pData)->pNext); pPage != NULL; pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPage->pNext)) { + ma_uint64 pageRangeBeg = runningCursor; + ma_uint64 pageRangeEnd = pageRangeBeg + pPage->sizeInFrames; + + if (frameIndex >= pageRangeBeg) { + if (frameIndex < pageRangeEnd || (frameIndex == pageRangeEnd && pPage == (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(ma_paged_audio_buffer_data_get_tail(pPagedAudioBuffer->pData)))) { /* A small edge case - allow seeking to the very end of the buffer. */ + /* We found the page. */ + pPagedAudioBuffer->pCurrent = pPage; + pPagedAudioBuffer->absoluteCursor = frameIndex; + pPagedAudioBuffer->relativeCursor = frameIndex - pageRangeBeg; + return MA_SUCCESS; + } + } + + runningCursor = pageRangeEnd; + } + + /* Getting here means we tried seeking too far forward. Don't change any state. */ + return MA_BAD_SEEK; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_get_cursor_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = pPagedAudioBuffer->absoluteCursor; + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_get_length_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pLength) +{ + return ma_paged_audio_buffer_data_get_length_in_pcm_frames(pPagedAudioBuffer->pData, pLength); +} + + + +/************************************************************************************************************************************************************** + +VFS + +**************************************************************************************************************************************************************/ +MA_API ma_result ma_vfs_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pVFS == NULL || pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onOpen == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onOpen(pVFS, pFilePath, openMode, pFile); +} + +MA_API ma_result ma_vfs_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pVFS == NULL || pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onOpenW == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onOpenW(pVFS, pFilePath, openMode, pFile); +} + +MA_API ma_result ma_vfs_close(ma_vfs* pVFS, ma_vfs_file file) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onClose == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onClose(pVFS, file); +} + +MA_API ma_result ma_vfs_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + ma_result result; + size_t bytesRead = 0; + + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + + if (pVFS == NULL || file == NULL || pDst == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onRead == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pCallbacks->onRead(pVFS, file, pDst, sizeInBytes, &bytesRead); + + if (pBytesRead != NULL) { + *pBytesRead = bytesRead; + } + + if (result == MA_SUCCESS && bytesRead == 0 && sizeInBytes > 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_vfs_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pBytesWritten != NULL) { + *pBytesWritten = 0; + } + + if (pVFS == NULL || file == NULL || pSrc == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onWrite == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onWrite(pVFS, file, pSrc, sizeInBytes, pBytesWritten); +} + +MA_API ma_result ma_vfs_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onSeek == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onSeek(pVFS, file, offset, origin); +} + +MA_API ma_result ma_vfs_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onTell == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onTell(pVFS, file, pCursor); +} + +MA_API ma_result ma_vfs_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pInfo); + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onInfo == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onInfo(pVFS, file, pInfo); +} + + +#if !defined(MA_USE_WIN32_FILEIO) && (defined(MA_WIN32) && defined(MA_WIN32_DESKTOP) && !defined(MA_NO_WIN32_FILEIO) && !defined(MA_POSIX)) +#define MA_USE_WIN32_FILEIO +#endif + +#if defined(MA_USE_WIN32_FILEIO) +/* +We need to dynamically load SetFilePointer or SetFilePointerEx because older versions of Windows do +not have the Ex version. We therefore need to do some dynamic branching depending on what's available. + +We load these when we load our first file from the default VFS. It's left open for the life of the +program and is left to the OS to uninitialize when the program terminates. +*/ +typedef DWORD (__stdcall * ma_SetFilePointer_proc)(HANDLE hFile, LONG lDistanceToMove, LONG* lpDistanceToMoveHigh, DWORD dwMoveMethod); +typedef BOOL (__stdcall * ma_SetFilePointerEx_proc)(HANDLE hFile, LARGE_INTEGER liDistanceToMove, LARGE_INTEGER* lpNewFilePointer, DWORD dwMoveMethod); + +static ma_handle hKernel32DLL = NULL; +static ma_SetFilePointer_proc ma_SetFilePointer = NULL; +static ma_SetFilePointerEx_proc ma_SetFilePointerEx = NULL; + +static void ma_win32_fileio_init(void) +{ + if (hKernel32DLL == NULL) { + hKernel32DLL = ma_dlopen(NULL, "kernel32.dll"); + if (hKernel32DLL != NULL) { + ma_SetFilePointer = (ma_SetFilePointer_proc) ma_dlsym(NULL, hKernel32DLL, "SetFilePointer"); + ma_SetFilePointerEx = (ma_SetFilePointerEx_proc)ma_dlsym(NULL, hKernel32DLL, "SetFilePointerEx"); + } + } +} + +static void ma_default_vfs__get_open_settings_win32(ma_uint32 openMode, DWORD* pDesiredAccess, DWORD* pShareMode, DWORD* pCreationDisposition) +{ + *pDesiredAccess = 0; + if ((openMode & MA_OPEN_MODE_READ) != 0) { + *pDesiredAccess |= GENERIC_READ; + } + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + *pDesiredAccess |= GENERIC_WRITE; + } + + *pShareMode = 0; + if ((openMode & MA_OPEN_MODE_READ) != 0) { + *pShareMode |= FILE_SHARE_READ; + } + + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + *pCreationDisposition = CREATE_ALWAYS; /* Opening in write mode. Truncate. */ + } else { + *pCreationDisposition = OPEN_EXISTING; /* Opening in read mode. File must exist. */ + } +} + +static ma_result ma_default_vfs_open__win32(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + HANDLE hFile; + DWORD dwDesiredAccess; + DWORD dwShareMode; + DWORD dwCreationDisposition; + + (void)pVFS; + + /* Load some Win32 symbols dynamically so we can dynamically check for the existence of SetFilePointerEx. */ + ma_win32_fileio_init(); + + ma_default_vfs__get_open_settings_win32(openMode, &dwDesiredAccess, &dwShareMode, &dwCreationDisposition); + + hFile = CreateFileA(pFilePath, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, FILE_ATTRIBUTE_NORMAL, NULL); + if (hFile == INVALID_HANDLE_VALUE) { + return ma_result_from_GetLastError(GetLastError()); + } + + *pFile = hFile; + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_open_w__win32(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + HANDLE hFile; + DWORD dwDesiredAccess; + DWORD dwShareMode; + DWORD dwCreationDisposition; + + (void)pVFS; + + /* Load some Win32 symbols dynamically so we can dynamically check for the existence of SetFilePointerEx. */ + ma_win32_fileio_init(); + + ma_default_vfs__get_open_settings_win32(openMode, &dwDesiredAccess, &dwShareMode, &dwCreationDisposition); + + hFile = CreateFileW(pFilePath, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, FILE_ATTRIBUTE_NORMAL, NULL); + if (hFile == INVALID_HANDLE_VALUE) { + return ma_result_from_GetLastError(GetLastError()); + } + + *pFile = hFile; + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_close__win32(ma_vfs* pVFS, ma_vfs_file file) +{ + (void)pVFS; + + if (CloseHandle((HANDLE)file) == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + + +static ma_result ma_default_vfs_read__win32(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + ma_result result = MA_SUCCESS; + size_t totalBytesRead; + + (void)pVFS; + + totalBytesRead = 0; + while (totalBytesRead < sizeInBytes) { + size_t bytesRemaining; + DWORD bytesToRead; + DWORD bytesRead; + BOOL readResult; + + bytesRemaining = sizeInBytes - totalBytesRead; + if (bytesRemaining >= 0xFFFFFFFF) { + bytesToRead = 0xFFFFFFFF; + } else { + bytesToRead = (DWORD)bytesRemaining; + } + + readResult = ReadFile((HANDLE)file, ma_offset_ptr(pDst, totalBytesRead), bytesToRead, &bytesRead, NULL); + if (readResult == 1 && bytesRead == 0) { + result = MA_AT_END; + break; /* EOF */ + } + + totalBytesRead += bytesRead; + + if (bytesRead < bytesToRead) { + break; /* EOF */ + } + + if (readResult == 0) { + result = ma_result_from_GetLastError(GetLastError()); + break; + } + } + + if (pBytesRead != NULL) { + *pBytesRead = totalBytesRead; + } + + return result; +} + +static ma_result ma_default_vfs_write__win32(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + ma_result result = MA_SUCCESS; + size_t totalBytesWritten; + + (void)pVFS; + + totalBytesWritten = 0; + while (totalBytesWritten < sizeInBytes) { + size_t bytesRemaining; + DWORD bytesToWrite; + DWORD bytesWritten; + BOOL writeResult; + + bytesRemaining = sizeInBytes - totalBytesWritten; + if (bytesRemaining >= 0xFFFFFFFF) { + bytesToWrite = 0xFFFFFFFF; + } else { + bytesToWrite = (DWORD)bytesRemaining; + } + + writeResult = WriteFile((HANDLE)file, ma_offset_ptr(pSrc, totalBytesWritten), bytesToWrite, &bytesWritten, NULL); + totalBytesWritten += bytesWritten; + + if (writeResult == 0) { + result = ma_result_from_GetLastError(GetLastError()); + break; + } + } + + if (pBytesWritten != NULL) { + *pBytesWritten = totalBytesWritten; + } + + return result; +} + + +static ma_result ma_default_vfs_seek__win32(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + LARGE_INTEGER liDistanceToMove; + DWORD dwMoveMethod; + BOOL result; + + (void)pVFS; + + liDistanceToMove.QuadPart = offset; + + /* */ if (origin == ma_seek_origin_current) { + dwMoveMethod = FILE_CURRENT; + } else if (origin == ma_seek_origin_end) { + dwMoveMethod = FILE_END; + } else { + dwMoveMethod = FILE_BEGIN; + } + + if (ma_SetFilePointerEx != NULL) { + result = ma_SetFilePointerEx((HANDLE)file, liDistanceToMove, NULL, dwMoveMethod); + } else if (ma_SetFilePointer != NULL) { + /* No SetFilePointerEx() so restrict to 31 bits. */ + if (origin > 0x7FFFFFFF) { + return MA_OUT_OF_RANGE; + } + + result = ma_SetFilePointer((HANDLE)file, (LONG)liDistanceToMove.QuadPart, NULL, dwMoveMethod); + } else { + return MA_NOT_IMPLEMENTED; + } + + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_tell__win32(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + LARGE_INTEGER liZero; + LARGE_INTEGER liTell; + BOOL result; + + (void)pVFS; + + liZero.QuadPart = 0; + + if (ma_SetFilePointerEx != NULL) { + result = ma_SetFilePointerEx((HANDLE)file, liZero, &liTell, FILE_CURRENT); + } else if (ma_SetFilePointer != NULL) { + LONG tell; + + result = ma_SetFilePointer((HANDLE)file, (LONG)liZero.QuadPart, &tell, FILE_CURRENT); + liTell.QuadPart = tell; + } else { + return MA_NOT_IMPLEMENTED; + } + + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + if (pCursor != NULL) { + *pCursor = liTell.QuadPart; + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_info__win32(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + BY_HANDLE_FILE_INFORMATION fi; + BOOL result; + + (void)pVFS; + + result = GetFileInformationByHandle((HANDLE)file, &fi); + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + pInfo->sizeInBytes = ((ma_uint64)fi.nFileSizeHigh << 32) | ((ma_uint64)fi.nFileSizeLow); + + return MA_SUCCESS; +} +#else +static ma_result ma_default_vfs_open__stdio(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_result result; + FILE* pFileStd; + const char* pOpenModeStr; + + MA_ASSERT(pFilePath != NULL); + MA_ASSERT(openMode != 0); + MA_ASSERT(pFile != NULL); + + (void)pVFS; + + if ((openMode & MA_OPEN_MODE_READ) != 0) { + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + pOpenModeStr = "r+"; + } else { + pOpenModeStr = "rb"; + } + } else { + pOpenModeStr = "wb"; + } + + result = ma_fopen(&pFileStd, pFilePath, pOpenModeStr); + if (result != MA_SUCCESS) { + return result; + } + + *pFile = pFileStd; + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_open_w__stdio(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_result result; + FILE* pFileStd; + const wchar_t* pOpenModeStr; + + MA_ASSERT(pFilePath != NULL); + MA_ASSERT(openMode != 0); + MA_ASSERT(pFile != NULL); + + (void)pVFS; + + if ((openMode & MA_OPEN_MODE_READ) != 0) { + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + pOpenModeStr = L"r+"; + } else { + pOpenModeStr = L"rb"; + } + } else { + pOpenModeStr = L"wb"; + } + + result = ma_wfopen(&pFileStd, pFilePath, pOpenModeStr, (pVFS != NULL) ? &((ma_default_vfs*)pVFS)->allocationCallbacks : NULL); + if (result != MA_SUCCESS) { + return result; + } + + *pFile = pFileStd; + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_close__stdio(ma_vfs* pVFS, ma_vfs_file file) +{ + MA_ASSERT(file != NULL); + + (void)pVFS; + + fclose((FILE*)file); + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_read__stdio(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + size_t result; + + MA_ASSERT(file != NULL); + MA_ASSERT(pDst != NULL); + + (void)pVFS; + + result = fread(pDst, 1, sizeInBytes, (FILE*)file); + + if (pBytesRead != NULL) { + *pBytesRead = result; + } + + if (result != sizeInBytes) { + if (result == 0 && feof((FILE*)file)) { + return MA_AT_END; + } else { + return ma_result_from_errno(ferror((FILE*)file)); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_write__stdio(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + size_t result; + + MA_ASSERT(file != NULL); + MA_ASSERT(pSrc != NULL); + + (void)pVFS; + + result = fwrite(pSrc, 1, sizeInBytes, (FILE*)file); + + if (pBytesWritten != NULL) { + *pBytesWritten = result; + } + + if (result != sizeInBytes) { + return ma_result_from_errno(ferror((FILE*)file)); + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_seek__stdio(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + int result; + int whence; + + MA_ASSERT(file != NULL); + + (void)pVFS; + + if (origin == ma_seek_origin_start) { + whence = SEEK_SET; + } else if (origin == ma_seek_origin_end) { + whence = SEEK_END; + } else { + whence = SEEK_CUR; + } + +#if defined(_WIN32) +#if defined(_MSC_VER) && _MSC_VER > 1200 + result = _fseeki64((FILE*)file, offset, whence); +#else + /* No _fseeki64() so restrict to 31 bits. */ + if (origin > 0x7FFFFFFF) { + return MA_OUT_OF_RANGE; + } + + result = fseek((FILE*)file, (int)offset, whence); +#endif +#else + result = fseek((FILE*)file, (long int)offset, whence); +#endif + if (result != 0) { + return MA_ERROR; + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_tell__stdio(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + ma_int64 result; + + MA_ASSERT(file != NULL); + MA_ASSERT(pCursor != NULL); + + (void)pVFS; + +#if defined(_WIN32) +#if defined(_MSC_VER) && _MSC_VER > 1200 + result = _ftelli64((FILE*)file); +#else + result = ftell((FILE*)file); +#endif +#else + result = ftell((FILE*)file); +#endif + + *pCursor = result; + + return MA_SUCCESS; +} + +#if !defined(_MSC_VER) && !((defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 1) || defined(_XOPEN_SOURCE) || defined(_POSIX_SOURCE)) && !defined(MA_BSD) +int fileno(FILE *stream); +#endif + +static ma_result ma_default_vfs_info__stdio(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + int fd; + struct stat info; + + MA_ASSERT(file != NULL); + MA_ASSERT(pInfo != NULL); + + (void)pVFS; + +#if defined(_MSC_VER) + fd = _fileno((FILE*)file); +#else + fd = fileno((FILE*)file); +#endif + + if (fstat(fd, &info) != 0) { + return ma_result_from_errno(errno); + } + + pInfo->sizeInBytes = info.st_size; + + return MA_SUCCESS; +} +#endif + + +static ma_result ma_default_vfs_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_open__win32(pVFS, pFilePath, openMode, pFile); +#else + return ma_default_vfs_open__stdio(pVFS, pFilePath, openMode, pFile); +#endif +} + +static ma_result ma_default_vfs_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_open_w__win32(pVFS, pFilePath, openMode, pFile); +#else + return ma_default_vfs_open_w__stdio(pVFS, pFilePath, openMode, pFile); +#endif +} + +static ma_result ma_default_vfs_close(ma_vfs* pVFS, ma_vfs_file file) +{ + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_close__win32(pVFS, file); +#else + return ma_default_vfs_close__stdio(pVFS, file); +#endif +} + +static ma_result ma_default_vfs_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + + if (file == NULL || pDst == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_read__win32(pVFS, file, pDst, sizeInBytes, pBytesRead); +#else + return ma_default_vfs_read__stdio(pVFS, file, pDst, sizeInBytes, pBytesRead); +#endif +} + +static ma_result ma_default_vfs_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + if (pBytesWritten != NULL) { + *pBytesWritten = 0; + } + + if (file == NULL || pSrc == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_write__win32(pVFS, file, pSrc, sizeInBytes, pBytesWritten); +#else + return ma_default_vfs_write__stdio(pVFS, file, pSrc, sizeInBytes, pBytesWritten); +#endif +} + +static ma_result ma_default_vfs_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_seek__win32(pVFS, file, offset, origin); +#else + return ma_default_vfs_seek__stdio(pVFS, file, offset, origin); +#endif +} + +static ma_result ma_default_vfs_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_tell__win32(pVFS, file, pCursor); +#else + return ma_default_vfs_tell__stdio(pVFS, file, pCursor); +#endif +} + +static ma_result ma_default_vfs_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + if (pInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pInfo); + + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_info__win32(pVFS, file, pInfo); +#else + return ma_default_vfs_info__stdio(pVFS, file, pInfo); +#endif +} + + +MA_API ma_result ma_default_vfs_init(ma_default_vfs* pVFS, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pVFS == NULL) { + return MA_INVALID_ARGS; + } + + pVFS->cb.onOpen = ma_default_vfs_open; + pVFS->cb.onOpenW = ma_default_vfs_open_w; + pVFS->cb.onClose = ma_default_vfs_close; + pVFS->cb.onRead = ma_default_vfs_read; + pVFS->cb.onWrite = ma_default_vfs_write; + pVFS->cb.onSeek = ma_default_vfs_seek; + pVFS->cb.onTell = ma_default_vfs_tell; + pVFS->cb.onInfo = ma_default_vfs_info; + ma_allocation_callbacks_init_copy(&pVFS->allocationCallbacks, pAllocationCallbacks); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_vfs_or_default_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pVFS != NULL) { + return ma_vfs_open(pVFS, pFilePath, openMode, pFile); + } else { + return ma_default_vfs_open(pVFS, pFilePath, openMode, pFile); + } +} + +MA_API ma_result ma_vfs_or_default_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pVFS != NULL) { + return ma_vfs_open_w(pVFS, pFilePath, openMode, pFile); + } else { + return ma_default_vfs_open_w(pVFS, pFilePath, openMode, pFile); + } +} + +MA_API ma_result ma_vfs_or_default_close(ma_vfs* pVFS, ma_vfs_file file) +{ + if (pVFS != NULL) { + return ma_vfs_close(pVFS, file); + } else { + return ma_default_vfs_close(pVFS, file); + } +} + +MA_API ma_result ma_vfs_or_default_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + if (pVFS != NULL) { + return ma_vfs_read(pVFS, file, pDst, sizeInBytes, pBytesRead); + } else { + return ma_default_vfs_read(pVFS, file, pDst, sizeInBytes, pBytesRead); + } +} + +MA_API ma_result ma_vfs_or_default_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + if (pVFS != NULL) { + return ma_vfs_write(pVFS, file, pSrc, sizeInBytes, pBytesWritten); + } else { + return ma_default_vfs_write(pVFS, file, pSrc, sizeInBytes, pBytesWritten); + } +} + +MA_API ma_result ma_vfs_or_default_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + if (pVFS != NULL) { + return ma_vfs_seek(pVFS, file, offset, origin); + } else { + return ma_default_vfs_seek(pVFS, file, offset, origin); + } +} + +MA_API ma_result ma_vfs_or_default_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + if (pVFS != NULL) { + return ma_vfs_tell(pVFS, file, pCursor); + } else { + return ma_default_vfs_tell(pVFS, file, pCursor); + } +} + +MA_API ma_result ma_vfs_or_default_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + if (pVFS != NULL) { + return ma_vfs_info(pVFS, file, pInfo); + } else { + return ma_default_vfs_info(pVFS, file, pInfo); + } +} + + + +static ma_result ma_vfs_open_and_read_file_ex(ma_vfs* pVFS, const char* pFilePath, const wchar_t* pFilePathW, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_vfs_file file; + ma_file_info info; + void* pData; + size_t bytesRead; + + if (ppData != NULL) { + *ppData = NULL; + } + if (pSize != NULL) { + *pSize = 0; + } + + if (ppData == NULL) { + return MA_INVALID_ARGS; + } + + if (pFilePath != NULL) { + result = ma_vfs_or_default_open(pVFS, pFilePath, MA_OPEN_MODE_READ, &file); + } else { + result = ma_vfs_or_default_open_w(pVFS, pFilePathW, MA_OPEN_MODE_READ, &file); + } + if (result != MA_SUCCESS) { + return result; + } + + result = ma_vfs_or_default_info(pVFS, file, &info); + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + if (info.sizeInBytes > MA_SIZE_MAX) { + ma_vfs_or_default_close(pVFS, file); + return MA_TOO_BIG; + } + + pData = ma_malloc((size_t)info.sizeInBytes, pAllocationCallbacks); /* Safe cast. */ + if (pData == NULL) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + result = ma_vfs_or_default_read(pVFS, file, pData, (size_t)info.sizeInBytes, &bytesRead); /* Safe cast. */ + ma_vfs_or_default_close(pVFS, file); + + if (result != MA_SUCCESS) { + ma_free(pData, pAllocationCallbacks); + return result; + } + + if (pSize != NULL) { + *pSize = bytesRead; + } + + MA_ASSERT(ppData != NULL); + *ppData = pData; + + return MA_SUCCESS; +} + +MA_API ma_result ma_vfs_open_and_read_file(ma_vfs* pVFS, const char* pFilePath, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_vfs_open_and_read_file_ex(pVFS, pFilePath, NULL, ppData, pSize, pAllocationCallbacks); +} + +MA_API ma_result ma_vfs_open_and_read_file_w(ma_vfs* pVFS, const wchar_t* pFilePath, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_vfs_open_and_read_file_ex(pVFS, NULL, pFilePath, ppData, pSize, pAllocationCallbacks); +} + + + +/************************************************************************************************************************************************************** + +Decoding and Encoding Headers. These are auto-generated from a tool. + +**************************************************************************************************************************************************************/ +#if !defined(MA_NO_WAV) && (!defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING)) +/* dr_wav_h begin */ +#ifndef ma_dr_wav_h +#define ma_dr_wav_h +#ifdef __cplusplus +extern "C" { +#endif +#define MA_DR_WAV_STRINGIFY(x) #x +#define MA_DR_WAV_XSTRINGIFY(x) MA_DR_WAV_STRINGIFY(x) +#define MA_DR_WAV_VERSION_MAJOR 0 +#define MA_DR_WAV_VERSION_MINOR 13 +#define MA_DR_WAV_VERSION_REVISION 12 +#define MA_DR_WAV_VERSION_STRING MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MAJOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MINOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_REVISION) +#include +#define MA_DR_WAVE_FORMAT_PCM 0x1 +#define MA_DR_WAVE_FORMAT_ADPCM 0x2 +#define MA_DR_WAVE_FORMAT_IEEE_FLOAT 0x3 +#define MA_DR_WAVE_FORMAT_ALAW 0x6 +#define MA_DR_WAVE_FORMAT_MULAW 0x7 +#define MA_DR_WAVE_FORMAT_DVI_ADPCM 0x11 +#define MA_DR_WAVE_FORMAT_EXTENSIBLE 0xFFFE +#define MA_DR_WAV_SEQUENTIAL 0x00000001 +#define MA_DR_WAV_WITH_METADATA 0x00000002 + MA_API void ma_dr_wav_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); + MA_API const char* ma_dr_wav_version_string(void); + typedef enum + { + ma_dr_wav_seek_origin_start, + ma_dr_wav_seek_origin_current + } ma_dr_wav_seek_origin; + typedef enum + { + ma_dr_wav_container_riff, + ma_dr_wav_container_rifx, + ma_dr_wav_container_w64, + ma_dr_wav_container_rf64, + ma_dr_wav_container_aiff + } ma_dr_wav_container; + typedef struct + { + union + { + ma_uint8 fourcc[4]; + ma_uint8 guid[16]; + } id; + ma_uint64 sizeInBytes; + unsigned int paddingSize; + } ma_dr_wav_chunk_header; + typedef struct + { + ma_uint16 formatTag; + ma_uint16 channels; + ma_uint32 sampleRate; + ma_uint32 avgBytesPerSec; + ma_uint16 blockAlign; + ma_uint16 bitsPerSample; + ma_uint16 extendedSize; + ma_uint16 validBitsPerSample; + ma_uint32 channelMask; + ma_uint8 subFormat[16]; + } ma_dr_wav_fmt; + MA_API ma_uint16 ma_dr_wav_fmt_get_format(const ma_dr_wav_fmt* pFMT); + typedef size_t (* ma_dr_wav_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); + typedef size_t (* ma_dr_wav_write_proc)(void* pUserData, const void* pData, size_t bytesToWrite); + typedef ma_bool32 (* ma_dr_wav_seek_proc)(void* pUserData, int offset, ma_dr_wav_seek_origin origin); + typedef ma_uint64 (* ma_dr_wav_chunk_proc)(void* pChunkUserData, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pReadSeekUserData, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_container container, const ma_dr_wav_fmt* pFMT); + typedef struct + { + const ma_uint8* data; + size_t dataSize; + size_t currentReadPos; + } ma_dr_wav__memory_stream; + typedef struct + { + void** ppData; + size_t* pDataSize; + size_t dataSize; + size_t dataCapacity; + size_t currentWritePos; + } ma_dr_wav__memory_stream_write; + typedef struct + { + ma_dr_wav_container container; + ma_uint32 format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 bitsPerSample; + } ma_dr_wav_data_format; + typedef enum + { + ma_dr_wav_metadata_type_none = 0, + ma_dr_wav_metadata_type_unknown = 1 << 0, + ma_dr_wav_metadata_type_smpl = 1 << 1, + ma_dr_wav_metadata_type_inst = 1 << 2, + ma_dr_wav_metadata_type_cue = 1 << 3, + ma_dr_wav_metadata_type_acid = 1 << 4, + ma_dr_wav_metadata_type_bext = 1 << 5, + ma_dr_wav_metadata_type_list_label = 1 << 6, + ma_dr_wav_metadata_type_list_note = 1 << 7, + ma_dr_wav_metadata_type_list_labelled_cue_region = 1 << 8, + ma_dr_wav_metadata_type_list_info_software = 1 << 9, + ma_dr_wav_metadata_type_list_info_copyright = 1 << 10, + ma_dr_wav_metadata_type_list_info_title = 1 << 11, + ma_dr_wav_metadata_type_list_info_artist = 1 << 12, + ma_dr_wav_metadata_type_list_info_comment = 1 << 13, + ma_dr_wav_metadata_type_list_info_date = 1 << 14, + ma_dr_wav_metadata_type_list_info_genre = 1 << 15, + ma_dr_wav_metadata_type_list_info_album = 1 << 16, + ma_dr_wav_metadata_type_list_info_tracknumber = 1 << 17, + ma_dr_wav_metadata_type_list_all_info_strings = ma_dr_wav_metadata_type_list_info_software + | ma_dr_wav_metadata_type_list_info_copyright + | ma_dr_wav_metadata_type_list_info_title + | ma_dr_wav_metadata_type_list_info_artist + | ma_dr_wav_metadata_type_list_info_comment + | ma_dr_wav_metadata_type_list_info_date + | ma_dr_wav_metadata_type_list_info_genre + | ma_dr_wav_metadata_type_list_info_album + | ma_dr_wav_metadata_type_list_info_tracknumber, + ma_dr_wav_metadata_type_list_all_adtl = ma_dr_wav_metadata_type_list_label + | ma_dr_wav_metadata_type_list_note + | ma_dr_wav_metadata_type_list_labelled_cue_region, + ma_dr_wav_metadata_type_all = -2, + ma_dr_wav_metadata_type_all_including_unknown = -1 + } ma_dr_wav_metadata_type; + typedef enum + { + ma_dr_wav_smpl_loop_type_forward = 0, + ma_dr_wav_smpl_loop_type_pingpong = 1, + ma_dr_wav_smpl_loop_type_backward = 2 + } ma_dr_wav_smpl_loop_type; + typedef struct + { + ma_uint32 cuePointId; + ma_uint32 type; + ma_uint32 firstSampleByteOffset; + ma_uint32 lastSampleByteOffset; + ma_uint32 sampleFraction; + ma_uint32 playCount; + } ma_dr_wav_smpl_loop; + typedef struct + { + ma_uint32 manufacturerId; + ma_uint32 productId; + ma_uint32 samplePeriodNanoseconds; + ma_uint32 midiUnityNote; + ma_uint32 midiPitchFraction; + ma_uint32 smpteFormat; + ma_uint32 smpteOffset; + ma_uint32 sampleLoopCount; + ma_uint32 samplerSpecificDataSizeInBytes; + ma_dr_wav_smpl_loop* pLoops; + ma_uint8* pSamplerSpecificData; + } ma_dr_wav_smpl; + typedef struct + { + ma_int8 midiUnityNote; + ma_int8 fineTuneCents; + ma_int8 gainDecibels; + ma_int8 lowNote; + ma_int8 highNote; + ma_int8 lowVelocity; + ma_int8 highVelocity; + } ma_dr_wav_inst; + typedef struct + { + ma_uint32 id; + ma_uint32 playOrderPosition; + ma_uint8 dataChunkId[4]; + ma_uint32 chunkStart; + ma_uint32 blockStart; + ma_uint32 sampleByteOffset; + } ma_dr_wav_cue_point; + typedef struct + { + ma_uint32 cuePointCount; + ma_dr_wav_cue_point *pCuePoints; + } ma_dr_wav_cue; + typedef enum + { + ma_dr_wav_acid_flag_one_shot = 1, + ma_dr_wav_acid_flag_root_note_set = 2, + ma_dr_wav_acid_flag_stretch = 4, + ma_dr_wav_acid_flag_disk_based = 8, + ma_dr_wav_acid_flag_acidizer = 16 + } ma_dr_wav_acid_flag; + typedef struct + { + ma_uint32 flags; + ma_uint16 midiUnityNote; + ma_uint16 reserved1; + float reserved2; + ma_uint32 numBeats; + ma_uint16 meterDenominator; + ma_uint16 meterNumerator; + float tempo; + } ma_dr_wav_acid; + typedef struct + { + ma_uint32 cuePointId; + ma_uint32 stringLength; + char* pString; + } ma_dr_wav_list_label_or_note; + typedef struct + { + char* pDescription; + char* pOriginatorName; + char* pOriginatorReference; + char pOriginationDate[10]; + char pOriginationTime[8]; + ma_uint64 timeReference; + ma_uint16 version; + char* pCodingHistory; + ma_uint32 codingHistorySize; + ma_uint8* pUMID; + ma_uint16 loudnessValue; + ma_uint16 loudnessRange; + ma_uint16 maxTruePeakLevel; + ma_uint16 maxMomentaryLoudness; + ma_uint16 maxShortTermLoudness; + } ma_dr_wav_bext; + typedef struct + { + ma_uint32 stringLength; + char* pString; + } ma_dr_wav_list_info_text; + typedef struct + { + ma_uint32 cuePointId; + ma_uint32 sampleLength; + ma_uint8 purposeId[4]; + ma_uint16 country; + ma_uint16 language; + ma_uint16 dialect; + ma_uint16 codePage; + ma_uint32 stringLength; + char* pString; + } ma_dr_wav_list_labelled_cue_region; + typedef enum + { + ma_dr_wav_metadata_location_invalid, + ma_dr_wav_metadata_location_top_level, + ma_dr_wav_metadata_location_inside_info_list, + ma_dr_wav_metadata_location_inside_adtl_list + } ma_dr_wav_metadata_location; + typedef struct + { + ma_uint8 id[4]; + ma_dr_wav_metadata_location chunkLocation; + ma_uint32 dataSizeInBytes; + ma_uint8* pData; + } ma_dr_wav_unknown_metadata; + typedef struct + { + ma_dr_wav_metadata_type type; + union + { + ma_dr_wav_cue cue; + ma_dr_wav_smpl smpl; + ma_dr_wav_acid acid; + ma_dr_wav_inst inst; + ma_dr_wav_bext bext; + ma_dr_wav_list_label_or_note labelOrNote; + ma_dr_wav_list_labelled_cue_region labelledCueRegion; + ma_dr_wav_list_info_text infoText; + ma_dr_wav_unknown_metadata unknown; + } data; + } ma_dr_wav_metadata; + typedef struct + { + ma_dr_wav_read_proc onRead; + ma_dr_wav_write_proc onWrite; + ma_dr_wav_seek_proc onSeek; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + ma_dr_wav_container container; + ma_dr_wav_fmt fmt; + ma_uint32 sampleRate; + ma_uint16 channels; + ma_uint16 bitsPerSample; + ma_uint16 translatedFormatTag; + ma_uint64 totalPCMFrameCount; + ma_uint64 dataChunkDataSize; + ma_uint64 dataChunkDataPos; + ma_uint64 bytesRemaining; + ma_uint64 readCursorInPCMFrames; + ma_uint64 dataChunkDataSizeTargetWrite; + ma_bool32 isSequentialWrite; + ma_dr_wav_metadata* pMetadata; + ma_uint32 metadataCount; + ma_dr_wav__memory_stream memoryStream; + ma_dr_wav__memory_stream_write memoryStreamWrite; + struct + { + ma_uint32 bytesRemainingInBlock; + ma_uint16 predictor[2]; + ma_int32 delta[2]; + ma_int32 cachedFrames[4]; + ma_uint32 cachedFrameCount; + ma_int32 prevFrames[2][2]; + } msadpcm; + struct + { + ma_uint32 bytesRemainingInBlock; + ma_int32 predictor[2]; + ma_int32 stepIndex[2]; + ma_int32 cachedFrames[16]; + ma_uint32 cachedFrameCount; + } ima; + struct + { + ma_bool8 isLE; + ma_bool8 isUnsigned; + } aiff; + } ma_dr_wav; + MA_API ma_bool32 ma_dr_wav_init(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_ex(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, ma_dr_wav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_with_metadata(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_write(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_write_sequential(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_write_sequential_pcm_frames(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_write_with_metadata(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount); + MA_API ma_uint64 ma_dr_wav_target_write_size_bytes(const ma_dr_wav_data_format* pFormat, ma_uint64 totalFrameCount, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount); + MA_API ma_dr_wav_metadata* ma_dr_wav_take_ownership_of_metadata(ma_dr_wav* pWav); + MA_API ma_result ma_dr_wav_uninit(ma_dr_wav* pWav); + MA_API size_t ma_dr_wav_read_raw(ma_dr_wav* pWav, size_t bytesToRead, void* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut); + MA_API ma_bool32 ma_dr_wav_seek_to_pcm_frame(ma_dr_wav* pWav, ma_uint64 targetFrameIndex); + MA_API ma_result ma_dr_wav_get_cursor_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pCursor); + MA_API ma_result ma_dr_wav_get_length_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pLength); + MA_API size_t ma_dr_wav_write_raw(ma_dr_wav* pWav, size_t bytesToWrite, const void* pData); + MA_API ma_uint64 ma_dr_wav_write_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData); + MA_API ma_uint64 ma_dr_wav_write_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData); + MA_API ma_uint64 ma_dr_wav_write_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData); +#ifndef MA_DR_WAV_NO_CONVERSION_API + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut); + MA_API void ma_dr_wav_u8_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_s24_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_s32_to_s16(ma_int16* pOut, const ma_int32* pIn, size_t sampleCount); + MA_API void ma_dr_wav_f32_to_s16(ma_int16* pOut, const float* pIn, size_t sampleCount); + MA_API void ma_dr_wav_f64_to_s16(ma_int16* pOut, const double* pIn, size_t sampleCount); + MA_API void ma_dr_wav_alaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_mulaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32le(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32be(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut); + MA_API void ma_dr_wav_u8_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_s16_to_f32(float* pOut, const ma_int16* pIn, size_t sampleCount); + MA_API void ma_dr_wav_s24_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_s32_to_f32(float* pOut, const ma_int32* pIn, size_t sampleCount); + MA_API void ma_dr_wav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount); + MA_API void ma_dr_wav_alaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_mulaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut); + MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut); + MA_API void ma_dr_wav_u8_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_s16_to_s32(ma_int32* pOut, const ma_int16* pIn, size_t sampleCount); + MA_API void ma_dr_wav_s24_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_f32_to_s32(ma_int32* pOut, const float* pIn, size_t sampleCount); + MA_API void ma_dr_wav_f64_to_s32(ma_int32* pOut, const double* pIn, size_t sampleCount); + MA_API void ma_dr_wav_alaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); + MA_API void ma_dr_wav_mulaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); +#endif +#ifndef MA_DR_WAV_NO_STDIO + MA_API ma_bool32 ma_dr_wav_init_file(ma_dr_wav* pWav, const char* filename, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_ex(ma_dr_wav* pWav, const char* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_ex_w(ma_dr_wav* pWav, const wchar_t* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_with_metadata(ma_dr_wav* pWav, const char* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_with_metadata_w(ma_dr_wav* pWav, const wchar_t* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_write(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_write_sequential(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_write_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#endif + MA_API ma_bool32 ma_dr_wav_init_memory(ma_dr_wav* pWav, const void* data, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_memory_ex(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_memory_with_metadata(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_memory_write(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential_pcm_frames(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_WAV_NO_CONVERSION_API + MA_API ma_int16* ma_dr_wav_open_and_read_pcm_frames_s16(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_wav_open_and_read_pcm_frames_f32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int32* ma_dr_wav_open_and_read_pcm_frames_s32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_WAV_NO_STDIO + MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +#endif + MA_API ma_int16* ma_dr_wav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_wav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int32* ma_dr_wav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +#endif + MA_API void ma_dr_wav_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_uint16 ma_dr_wav_bytes_to_u16(const ma_uint8* data); + MA_API ma_int16 ma_dr_wav_bytes_to_s16(const ma_uint8* data); + MA_API ma_uint32 ma_dr_wav_bytes_to_u32(const ma_uint8* data); + MA_API ma_int32 ma_dr_wav_bytes_to_s32(const ma_uint8* data); + MA_API ma_uint64 ma_dr_wav_bytes_to_u64(const ma_uint8* data); + MA_API ma_int64 ma_dr_wav_bytes_to_s64(const ma_uint8* data); + MA_API float ma_dr_wav_bytes_to_f32(const ma_uint8* data); + MA_API ma_bool32 ma_dr_wav_guid_equal(const ma_uint8 a[16], const ma_uint8 b[16]); + MA_API ma_bool32 ma_dr_wav_fourcc_equal(const ma_uint8* a, const char* b); +#ifdef __cplusplus +} +#endif +#endif +/* dr_wav_h end */ +#endif /* MA_NO_WAV */ + +#if !defined(MA_NO_FLAC) && !defined(MA_NO_DECODING) +/* dr_flac_h begin */ +#ifndef ma_dr_flac_h +#define ma_dr_flac_h +#ifdef __cplusplus +extern "C" { +#endif +#define MA_DR_FLAC_STRINGIFY(x) #x +#define MA_DR_FLAC_XSTRINGIFY(x) MA_DR_FLAC_STRINGIFY(x) +#define MA_DR_FLAC_VERSION_MAJOR 0 +#define MA_DR_FLAC_VERSION_MINOR 12 +#define MA_DR_FLAC_VERSION_REVISION 41 +#define MA_DR_FLAC_VERSION_STRING MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_MAJOR) "." MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_MINOR) "." MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_REVISION) +#include +#if defined(_MSC_VER) && _MSC_VER >= 1700 +#define MA_DR_FLAC_DEPRECATED __declspec(deprecated) +#elif (defined(__GNUC__) && __GNUC__ >= 4) +#define MA_DR_FLAC_DEPRECATED __attribute__((deprecated)) +#elif defined(__has_feature) +#if __has_feature(attribute_deprecated) +#define MA_DR_FLAC_DEPRECATED __attribute__((deprecated)) +#else +#define MA_DR_FLAC_DEPRECATED +#endif +#else +#define MA_DR_FLAC_DEPRECATED +#endif + MA_API void ma_dr_flac_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); + MA_API const char* ma_dr_flac_version_string(void); +#ifndef MA_DR_FLAC_BUFFER_SIZE +#define MA_DR_FLAC_BUFFER_SIZE 4096 +#endif +#ifdef MA_64BIT + typedef ma_uint64 ma_dr_flac_cache_t; +#else + typedef ma_uint32 ma_dr_flac_cache_t; +#endif +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO 0 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_PADDING 1 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_APPLICATION 2 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_SEEKTABLE 3 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_VORBIS_COMMENT 4 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_CUESHEET 5 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_PICTURE 6 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_INVALID 127 +#define MA_DR_FLAC_PICTURE_TYPE_OTHER 0 +#define MA_DR_FLAC_PICTURE_TYPE_FILE_ICON 1 +#define MA_DR_FLAC_PICTURE_TYPE_OTHER_FILE_ICON 2 +#define MA_DR_FLAC_PICTURE_TYPE_COVER_FRONT 3 +#define MA_DR_FLAC_PICTURE_TYPE_COVER_BACK 4 +#define MA_DR_FLAC_PICTURE_TYPE_LEAFLET_PAGE 5 +#define MA_DR_FLAC_PICTURE_TYPE_MEDIA 6 +#define MA_DR_FLAC_PICTURE_TYPE_LEAD_ARTIST 7 +#define MA_DR_FLAC_PICTURE_TYPE_ARTIST 8 +#define MA_DR_FLAC_PICTURE_TYPE_CONDUCTOR 9 +#define MA_DR_FLAC_PICTURE_TYPE_BAND 10 +#define MA_DR_FLAC_PICTURE_TYPE_COMPOSER 11 +#define MA_DR_FLAC_PICTURE_TYPE_LYRICIST 12 +#define MA_DR_FLAC_PICTURE_TYPE_RECORDING_LOCATION 13 +#define MA_DR_FLAC_PICTURE_TYPE_DURING_RECORDING 14 +#define MA_DR_FLAC_PICTURE_TYPE_DURING_PERFORMANCE 15 +#define MA_DR_FLAC_PICTURE_TYPE_SCREEN_CAPTURE 16 +#define MA_DR_FLAC_PICTURE_TYPE_BRIGHT_COLORED_FISH 17 +#define MA_DR_FLAC_PICTURE_TYPE_ILLUSTRATION 18 +#define MA_DR_FLAC_PICTURE_TYPE_BAND_LOGOTYPE 19 +#define MA_DR_FLAC_PICTURE_TYPE_PUBLISHER_LOGOTYPE 20 + typedef enum + { + ma_dr_flac_container_native, + ma_dr_flac_container_ogg, + ma_dr_flac_container_unknown + } ma_dr_flac_container; + typedef enum + { + ma_dr_flac_seek_origin_start, + ma_dr_flac_seek_origin_current + } ma_dr_flac_seek_origin; + typedef struct + { + ma_uint64 firstPCMFrame; + ma_uint64 flacFrameOffset; + ma_uint16 pcmFrameCount; + } ma_dr_flac_seekpoint; + typedef struct + { + ma_uint16 minBlockSizeInPCMFrames; + ma_uint16 maxBlockSizeInPCMFrames; + ma_uint32 minFrameSizeInPCMFrames; + ma_uint32 maxFrameSizeInPCMFrames; + ma_uint32 sampleRate; + ma_uint8 channels; + ma_uint8 bitsPerSample; + ma_uint64 totalPCMFrameCount; + ma_uint8 md5[16]; + } ma_dr_flac_streaminfo; + typedef struct + { + ma_uint32 type; + const void* pRawData; + ma_uint32 rawDataSize; + union + { + ma_dr_flac_streaminfo streaminfo; + struct + { + int unused; + } padding; + struct + { + ma_uint32 id; + const void* pData; + ma_uint32 dataSize; + } application; + struct + { + ma_uint32 seekpointCount; + const ma_dr_flac_seekpoint* pSeekpoints; + } seektable; + struct + { + ma_uint32 vendorLength; + const char* vendor; + ma_uint32 commentCount; + const void* pComments; + } vorbis_comment; + struct + { + char catalog[128]; + ma_uint64 leadInSampleCount; + ma_bool32 isCD; + ma_uint8 trackCount; + const void* pTrackData; + } cuesheet; + struct + { + ma_uint32 type; + ma_uint32 mimeLength; + const char* mime; + ma_uint32 descriptionLength; + const char* description; + ma_uint32 width; + ma_uint32 height; + ma_uint32 colorDepth; + ma_uint32 indexColorCount; + ma_uint32 pictureDataSize; + const ma_uint8* pPictureData; + } picture; + } data; + } ma_dr_flac_metadata; + typedef size_t (* ma_dr_flac_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); + typedef ma_bool32 (* ma_dr_flac_seek_proc)(void* pUserData, int offset, ma_dr_flac_seek_origin origin); + typedef void (* ma_dr_flac_meta_proc)(void* pUserData, ma_dr_flac_metadata* pMetadata); + typedef struct + { + const ma_uint8* data; + size_t dataSize; + size_t currentReadPos; + } ma_dr_flac__memory_stream; + typedef struct + { + ma_dr_flac_read_proc onRead; + ma_dr_flac_seek_proc onSeek; + void* pUserData; + size_t unalignedByteCount; + ma_dr_flac_cache_t unalignedCache; + ma_uint32 nextL2Line; + ma_uint32 consumedBits; + ma_dr_flac_cache_t cacheL2[MA_DR_FLAC_BUFFER_SIZE/sizeof(ma_dr_flac_cache_t)]; + ma_dr_flac_cache_t cache; + ma_uint16 crc16; + ma_dr_flac_cache_t crc16Cache; + ma_uint32 crc16CacheIgnoredBytes; + } ma_dr_flac_bs; + typedef struct + { + ma_uint8 subframeType; + ma_uint8 wastedBitsPerSample; + ma_uint8 lpcOrder; + ma_int32* pSamplesS32; + } ma_dr_flac_subframe; + typedef struct + { + ma_uint64 pcmFrameNumber; + ma_uint32 flacFrameNumber; + ma_uint32 sampleRate; + ma_uint16 blockSizeInPCMFrames; + ma_uint8 channelAssignment; + ma_uint8 bitsPerSample; + ma_uint8 crc8; + } ma_dr_flac_frame_header; + typedef struct + { + ma_dr_flac_frame_header header; + ma_uint32 pcmFramesRemaining; + ma_dr_flac_subframe subframes[8]; + } ma_dr_flac_frame; + typedef struct + { + ma_dr_flac_meta_proc onMeta; + void* pUserDataMD; + ma_allocation_callbacks allocationCallbacks; + ma_uint32 sampleRate; + ma_uint8 channels; + ma_uint8 bitsPerSample; + ma_uint16 maxBlockSizeInPCMFrames; + ma_uint64 totalPCMFrameCount; + ma_dr_flac_container container; + ma_uint32 seekpointCount; + ma_dr_flac_frame currentFLACFrame; + ma_uint64 currentPCMFrame; + ma_uint64 firstFLACFramePosInBytes; + ma_dr_flac__memory_stream memoryStream; + ma_int32* pDecodedSamples; + ma_dr_flac_seekpoint* pSeekpoints; + void* _oggbs; + ma_bool32 _noSeekTableSeek : 1; + ma_bool32 _noBinarySearchSeek : 1; + ma_bool32 _noBruteForceSeek : 1; + ma_dr_flac_bs bs; + ma_uint8 pExtraData[1]; + } ma_dr_flac; + MA_API ma_dr_flac* ma_dr_flac_open(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_dr_flac* ma_dr_flac_open_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_dr_flac* ma_dr_flac_open_with_metadata(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_dr_flac* ma_dr_flac_open_with_metadata_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API void ma_dr_flac_close(ma_dr_flac* pFlac); + MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s32(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int32* pBufferOut); + MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s16(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int16* pBufferOut); + MA_API ma_uint64 ma_dr_flac_read_pcm_frames_f32(ma_dr_flac* pFlac, ma_uint64 framesToRead, float* pBufferOut); + MA_API ma_bool32 ma_dr_flac_seek_to_pcm_frame(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex); +#ifndef MA_DR_FLAC_NO_STDIO + MA_API ma_dr_flac* ma_dr_flac_open_file(const char* pFileName, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_dr_flac* ma_dr_flac_open_file_w(const wchar_t* pFileName, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata(const char* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata_w(const wchar_t* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +#endif + MA_API ma_dr_flac* ma_dr_flac_open_memory(const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_dr_flac* ma_dr_flac_open_memory_with_metadata(const void* pData, size_t dataSize, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int32* ma_dr_flac_open_and_read_pcm_frames_s32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int16* ma_dr_flac_open_and_read_pcm_frames_s16(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_flac_open_and_read_pcm_frames_f32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_FLAC_NO_STDIO + MA_API ma_int32* ma_dr_flac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int16* ma_dr_flac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_flac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#endif + MA_API ma_int32* ma_dr_flac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int16* ma_dr_flac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_flac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API void ma_dr_flac_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + typedef struct + { + ma_uint32 countRemaining; + const char* pRunningData; + } ma_dr_flac_vorbis_comment_iterator; + MA_API void ma_dr_flac_init_vorbis_comment_iterator(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32 commentCount, const void* pComments); + MA_API const char* ma_dr_flac_next_vorbis_comment(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32* pCommentLengthOut); + typedef struct + { + ma_uint32 countRemaining; + const char* pRunningData; + } ma_dr_flac_cuesheet_track_iterator; + typedef struct + { + ma_uint64 offset; + ma_uint8 index; + ma_uint8 reserved[3]; + } ma_dr_flac_cuesheet_track_index; + typedef struct + { + ma_uint64 offset; + ma_uint8 trackNumber; + char ISRC[12]; + ma_bool8 isAudio; + ma_bool8 preEmphasis; + ma_uint8 indexCount; + const ma_dr_flac_cuesheet_track_index* pIndexPoints; + } ma_dr_flac_cuesheet_track; + MA_API void ma_dr_flac_init_cuesheet_track_iterator(ma_dr_flac_cuesheet_track_iterator* pIter, ma_uint32 trackCount, const void* pTrackData); + MA_API ma_bool32 ma_dr_flac_next_cuesheet_track(ma_dr_flac_cuesheet_track_iterator* pIter, ma_dr_flac_cuesheet_track* pCuesheetTrack); +#ifdef __cplusplus +} +#endif +#endif +/* dr_flac_h end */ +#endif /* MA_NO_FLAC */ + +#if !defined(MA_NO_MP3) && !defined(MA_NO_DECODING) +/* dr_mp3_h begin */ +#ifndef ma_dr_mp3_h +#define ma_dr_mp3_h +#ifdef __cplusplus +extern "C" { +#endif +#define MA_DR_MP3_STRINGIFY(x) #x +#define MA_DR_MP3_XSTRINGIFY(x) MA_DR_MP3_STRINGIFY(x) +#define MA_DR_MP3_VERSION_MAJOR 0 +#define MA_DR_MP3_VERSION_MINOR 6 +#define MA_DR_MP3_VERSION_REVISION 37 +#define MA_DR_MP3_VERSION_STRING MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_MAJOR) "." MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_MINOR) "." MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_REVISION) +#include +#define MA_DR_MP3_MAX_PCM_FRAMES_PER_MP3_FRAME 1152 +#define MA_DR_MP3_MAX_SAMPLES_PER_FRAME (MA_DR_MP3_MAX_PCM_FRAMES_PER_MP3_FRAME*2) + MA_API void ma_dr_mp3_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); + MA_API const char* ma_dr_mp3_version_string(void); + typedef struct + { + int frame_bytes, channels, hz, layer, bitrate_kbps; + } ma_dr_mp3dec_frame_info; + typedef struct + { + float mdct_overlap[2][9*32], qmf_state[15*2*32]; + int reserv, free_format_bytes; + ma_uint8 header[4], reserv_buf[511]; + } ma_dr_mp3dec; + MA_API void ma_dr_mp3dec_init(ma_dr_mp3dec *dec); + MA_API int ma_dr_mp3dec_decode_frame(ma_dr_mp3dec *dec, const ma_uint8 *mp3, int mp3_bytes, void *pcm, ma_dr_mp3dec_frame_info *info); + MA_API void ma_dr_mp3dec_f32_to_s16(const float *in, ma_int16 *out, size_t num_samples); + typedef enum + { + ma_dr_mp3_seek_origin_start, + ma_dr_mp3_seek_origin_current + } ma_dr_mp3_seek_origin; + typedef struct + { + ma_uint64 seekPosInBytes; + ma_uint64 pcmFrameIndex; + ma_uint16 mp3FramesToDiscard; + ma_uint16 pcmFramesToDiscard; + } ma_dr_mp3_seek_point; + typedef size_t (* ma_dr_mp3_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); + typedef ma_bool32 (* ma_dr_mp3_seek_proc)(void* pUserData, int offset, ma_dr_mp3_seek_origin origin); + typedef struct + { + ma_uint32 channels; + ma_uint32 sampleRate; + } ma_dr_mp3_config; + typedef struct + { + ma_dr_mp3dec decoder; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_dr_mp3_read_proc onRead; + ma_dr_mp3_seek_proc onSeek; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + ma_uint32 mp3FrameChannels; + ma_uint32 mp3FrameSampleRate; + ma_uint32 pcmFramesConsumedInMP3Frame; + ma_uint32 pcmFramesRemainingInMP3Frame; + ma_uint8 pcmFrames[sizeof(float)*MA_DR_MP3_MAX_SAMPLES_PER_FRAME]; + ma_uint64 currentPCMFrame; + ma_uint64 streamCursor; + ma_dr_mp3_seek_point* pSeekPoints; + ma_uint32 seekPointCount; + size_t dataSize; + size_t dataCapacity; + size_t dataConsumed; + ma_uint8* pData; + ma_bool32 atEnd : 1; + struct + { + const ma_uint8* pData; + size_t dataSize; + size_t currentReadPos; + } memory; + } ma_dr_mp3; + MA_API ma_bool32 ma_dr_mp3_init(ma_dr_mp3* pMP3, ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_mp3_init_memory(ma_dr_mp3* pMP3, const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_MP3_NO_STDIO + MA_API ma_bool32 ma_dr_mp3_init_file(ma_dr_mp3* pMP3, const char* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_bool32 ma_dr_mp3_init_file_w(ma_dr_mp3* pMP3, const wchar_t* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks); +#endif + MA_API void ma_dr_mp3_uninit(ma_dr_mp3* pMP3); + MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_f32(ma_dr_mp3* pMP3, ma_uint64 framesToRead, float* pBufferOut); + MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_s16(ma_dr_mp3* pMP3, ma_uint64 framesToRead, ma_int16* pBufferOut); + MA_API ma_bool32 ma_dr_mp3_seek_to_pcm_frame(ma_dr_mp3* pMP3, ma_uint64 frameIndex); + MA_API ma_uint64 ma_dr_mp3_get_pcm_frame_count(ma_dr_mp3* pMP3); + MA_API ma_uint64 ma_dr_mp3_get_mp3_frame_count(ma_dr_mp3* pMP3); + MA_API ma_bool32 ma_dr_mp3_get_mp3_and_pcm_frame_count(ma_dr_mp3* pMP3, ma_uint64* pMP3FrameCount, ma_uint64* pPCMFrameCount); + MA_API ma_bool32 ma_dr_mp3_calculate_seek_points(ma_dr_mp3* pMP3, ma_uint32* pSeekPointCount, ma_dr_mp3_seek_point* pSeekPoints); + MA_API ma_bool32 ma_dr_mp3_bind_seek_table(ma_dr_mp3* pMP3, ma_uint32 seekPointCount, ma_dr_mp3_seek_point* pSeekPoints); + MA_API float* ma_dr_mp3_open_and_read_pcm_frames_f32(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int16* ma_dr_mp3_open_and_read_pcm_frames_s16(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API float* ma_dr_mp3_open_memory_and_read_pcm_frames_f32(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int16* ma_dr_mp3_open_memory_and_read_pcm_frames_s16(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_MP3_NO_STDIO + MA_API float* ma_dr_mp3_open_file_and_read_pcm_frames_f32(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API ma_int16* ma_dr_mp3_open_file_and_read_pcm_frames_s16(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#endif + MA_API void* ma_dr_mp3_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + MA_API void ma_dr_mp3_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); +#ifdef __cplusplus +} +#endif +#endif +/* dr_mp3_h end */ +#endif /* MA_NO_MP3 */ + + +/************************************************************************************************************************************************************** + +Decoding + +**************************************************************************************************************************************************************/ +#ifndef MA_NO_DECODING + +static ma_result ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + MA_ASSERT(pDecoder != NULL); + + return pDecoder->onRead(pDecoder, pBufferOut, bytesToRead, pBytesRead); +} + +static ma_result ma_decoder_seek_bytes(ma_decoder* pDecoder, ma_int64 byteOffset, ma_seek_origin origin) +{ + MA_ASSERT(pDecoder != NULL); + + return pDecoder->onSeek(pDecoder, byteOffset, origin); +} + +static ma_result ma_decoder_tell_bytes(ma_decoder* pDecoder, ma_int64* pCursor) +{ + MA_ASSERT(pDecoder != NULL); + + if (pDecoder->onTell == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pDecoder->onTell(pDecoder, pCursor); +} + + +MA_API ma_decoding_backend_config ma_decoding_backend_config_init(ma_format preferredFormat, ma_uint32 seekPointCount) +{ + ma_decoding_backend_config config; + + MA_ZERO_OBJECT(&config); + config.preferredFormat = preferredFormat; + config.seekPointCount = seekPointCount; + + return config; +} + + +MA_API ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate) +{ + ma_decoder_config config; + MA_ZERO_OBJECT(&config); + config.format = outputFormat; + config.channels = outputChannels; + config.sampleRate = outputSampleRate; + config.resampling = ma_resampler_config_init(ma_format_unknown, 0, 0, 0, ma_resample_algorithm_linear); /* Format/channels/rate doesn't matter here. */ + config.encodingFormat = ma_encoding_format_unknown; + + /* Note that we are intentionally leaving the channel map empty here which will cause the default channel map to be used. */ + + return config; +} + +MA_API ma_decoder_config ma_decoder_config_init_default() +{ + return ma_decoder_config_init(ma_format_unknown, 0, 0); +} + +MA_API ma_decoder_config ma_decoder_config_init_copy(const ma_decoder_config* pConfig) +{ + ma_decoder_config config; + if (pConfig != NULL) { + config = *pConfig; + } else { + MA_ZERO_OBJECT(&config); + } + + return config; +} + +static ma_result ma_decoder__init_data_converter(ma_decoder* pDecoder, const ma_decoder_config* pConfig) +{ + ma_result result; + ma_data_converter_config converterConfig; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pConfig != NULL); + + result = ma_data_source_get_data_format(pDecoder->pBackend, &internalFormat, &internalChannels, &internalSampleRate, internalChannelMap, ma_countof(internalChannelMap)); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal data format. */ + } + + + /* Make sure we're not asking for too many channels. */ + if (pConfig->channels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + /* The internal channels should have already been validated at a higher level, but we'll do it again explicitly here for safety. */ + if (internalChannels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + + /* Output format. */ + if (pConfig->format == ma_format_unknown) { + pDecoder->outputFormat = internalFormat; + } else { + pDecoder->outputFormat = pConfig->format; + } + + if (pConfig->channels == 0) { + pDecoder->outputChannels = internalChannels; + } else { + pDecoder->outputChannels = pConfig->channels; + } + + if (pConfig->sampleRate == 0) { + pDecoder->outputSampleRate = internalSampleRate; + } else { + pDecoder->outputSampleRate = pConfig->sampleRate; + } + + converterConfig = ma_data_converter_config_init( + internalFormat, pDecoder->outputFormat, + internalChannels, pDecoder->outputChannels, + internalSampleRate, pDecoder->outputSampleRate + ); + converterConfig.pChannelMapIn = internalChannelMap; + converterConfig.pChannelMapOut = pConfig->pChannelMap; + converterConfig.channelMixMode = pConfig->channelMixMode; + converterConfig.ditherMode = pConfig->ditherMode; + converterConfig.allowDynamicSampleRate = MA_FALSE; /* Never allow dynamic sample rate conversion. Setting this to true will disable passthrough optimizations. */ + converterConfig.resampling = pConfig->resampling; + + result = ma_data_converter_init(&converterConfig, &pDecoder->allocationCallbacks, &pDecoder->converter); + if (result != MA_SUCCESS) { + return result; + } + + /* + Now that we have the decoder we need to determine whether or not we need a heap-allocated cache. We'll + need this if the data converter does not support calculation of the required input frame count. To + determine support for this we'll just run a test. + */ + { + ma_uint64 unused; + + result = ma_data_converter_get_required_input_frame_count(&pDecoder->converter, 1, &unused); + if (result != MA_SUCCESS) { + /* + We were unable to calculate the required input frame count which means we'll need to use + a heap-allocated cache. + */ + ma_uint64 inputCacheCapSizeInBytes; + + pDecoder->inputCacheCap = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(internalFormat, internalChannels); + + /* Not strictly necessary, but keeping here for safety in case we change the default value of pDecoder->inputCacheCap. */ + inputCacheCapSizeInBytes = pDecoder->inputCacheCap * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (inputCacheCapSizeInBytes > MA_SIZE_MAX) { + ma_data_converter_uninit(&pDecoder->converter, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + pDecoder->pInputCache = ma_malloc((size_t)inputCacheCapSizeInBytes, &pDecoder->allocationCallbacks); /* Safe cast to size_t. */ + if (pDecoder->pInputCache == NULL) { + ma_data_converter_uninit(&pDecoder->converter, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + } + } + + return MA_SUCCESS; +} + + + +static ma_result ma_decoder_internal_on_read__custom(void* pUserData, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead, pBytesRead); +} + +static ma_result ma_decoder_internal_on_seek__custom(void* pUserData, ma_int64 offset, ma_seek_origin origin) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_seek_bytes(pDecoder, offset, origin); +} + +static ma_result ma_decoder_internal_on_tell__custom(void* pUserData, ma_int64* pCursor) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_tell_bytes(pDecoder, pCursor); +} + + +static ma_result ma_decoder_init_from_vtable__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInit == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInit(pVTableUserData, ma_decoder_internal_on_read__custom, ma_decoder_internal_on_seek__custom, ma_decoder_internal_on_tell__custom, pDecoder, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + +static ma_result ma_decoder_init_from_file__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInitFile == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInitFile(pVTableUserData, pFilePath, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + +static ma_result ma_decoder_init_from_file_w__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInitFileW == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInitFileW(pVTableUserData, pFilePath, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + +static ma_result ma_decoder_init_from_memory__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInitMemory == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInitMemory(pVTableUserData, pData, dataSize, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + + + +static ma_result ma_decoder_init_custom__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_vtable__internal(pVTable, pConfig->pCustomBackendUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } else { + /* Initialization failed. Move on to the next one, but seek back to the start first so the next vtable starts from the first byte of the file. */ + result = ma_decoder_seek_bytes(pDecoder, 0, ma_seek_origin_start); + if (result != MA_SUCCESS) { + return result; /* Failed to seek back to the start. */ + } + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + +static ma_result ma_decoder_init_custom_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_file__internal(pVTable, pConfig->pCustomBackendUserData, pFilePath, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + +static ma_result ma_decoder_init_custom_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_file_w__internal(pVTable, pConfig->pCustomBackendUserData, pFilePath, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + +static ma_result ma_decoder_init_custom_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_memory__internal(pVTable, pConfig->pCustomBackendUserData, pData, dataSize, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + + +/* WAV */ +#ifdef ma_dr_wav_h +#define MA_HAS_WAV + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_format format; /* Can be f32, s16 or s32. */ +#if !defined(MA_NO_WAV) + ma_dr_wav dr; +#endif +} ma_wav; + +MA_API ma_result ma_wav_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API ma_result ma_wav_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API ma_result ma_wav_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API ma_result ma_wav_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API void ma_wav_uninit(ma_wav* pWav, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_wav_read_pcm_frames(ma_wav* pWav, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_wav_seek_to_pcm_frame(ma_wav* pWav, ma_uint64 frameIndex); +MA_API ma_result ma_wav_get_data_format(ma_wav* pWav, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_wav_get_cursor_in_pcm_frames(ma_wav* pWav, ma_uint64* pCursor); +MA_API ma_result ma_wav_get_length_in_pcm_frames(ma_wav* pWav, ma_uint64* pLength); + + +static ma_result ma_wav_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_wav_read_pcm_frames((ma_wav*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_wav_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_wav_seek_to_pcm_frame((ma_wav*)pDataSource, frameIndex); +} + +static ma_result ma_wav_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_wav_get_data_format((ma_wav*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_wav_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_wav_get_cursor_in_pcm_frames((ma_wav*)pDataSource, pCursor); +} + +static ma_result ma_wav_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_wav_get_length_in_pcm_frames((ma_wav*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_wav_ds_vtable = +{ + ma_wav_ds_read, + ma_wav_ds_seek, + ma_wav_ds_get_data_format, + ma_wav_ds_get_cursor, + ma_wav_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +#if !defined(MA_NO_WAV) +static size_t ma_wav_dr_callback__read(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_wav* pWav = (ma_wav*)pUserData; + ma_result result; + size_t bytesRead; + + MA_ASSERT(pWav != NULL); + + result = pWav->onRead(pWav->pReadSeekTellUserData, pBufferOut, bytesToRead, &bytesRead); + (void)result; + + return bytesRead; +} + +static ma_bool32 ma_wav_dr_callback__seek(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_wav* pWav = (ma_wav*)pUserData; + ma_result result; + ma_seek_origin maSeekOrigin; + + MA_ASSERT(pWav != NULL); + + maSeekOrigin = ma_seek_origin_start; + if (origin == ma_dr_wav_seek_origin_current) { + maSeekOrigin = ma_seek_origin_current; + } + + result = pWav->onSeek(pWav->pReadSeekTellUserData, offset, maSeekOrigin); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + return MA_TRUE; +} +#endif + +static ma_result ma_wav_init_internal(const ma_decoding_backend_config* pConfig, ma_wav* pWav) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pWav); + pWav->format = ma_format_unknown; /* Use closest match to source file by default. */ + + if (pConfig != NULL && (pConfig->preferredFormat == ma_format_f32 || pConfig->preferredFormat == ma_format_s16 || pConfig->preferredFormat == ma_format_s32)) { + pWav->format = pConfig->preferredFormat; + } else { + /* Getting here means something other than f32 and s16 was specified. Just leave this unset to use the default format. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_wav_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pWav->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_wav_post_init(ma_wav* pWav) +{ + /* + If an explicit format was not specified, try picking the closest match based on the internal + format. The format needs to be supported by miniaudio. + */ + if (pWav->format == ma_format_unknown) { + switch (pWav->dr.translatedFormatTag) + { + case MA_DR_WAVE_FORMAT_PCM: + { + if (pWav->dr.bitsPerSample == 8) { + pWav->format = ma_format_u8; + } else if (pWav->dr.bitsPerSample == 16) { + pWav->format = ma_format_s16; + } else if (pWav->dr.bitsPerSample == 24) { + pWav->format = ma_format_s24; + } else if (pWav->dr.bitsPerSample == 32) { + pWav->format = ma_format_s32; + } + } break; + + case MA_DR_WAVE_FORMAT_IEEE_FLOAT: + { + if (pWav->dr.bitsPerSample == 32) { + pWav->format = ma_format_f32; + } + } break; + + default: break; + } + + /* Fall back to f32 if we couldn't find anything. */ + if (pWav->format == ma_format_unknown) { + pWav->format = ma_format_f32; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_wav_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pWav->onRead = onRead; + pWav->onSeek = onSeek; + pWav->onTell = onTell; + pWav->pReadSeekTellUserData = pReadSeekTellUserData; + +#if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init(&pWav->dr, ma_wav_dr_callback__read, ma_wav_dr_callback__seek, pWav, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_wav_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init_file(&pWav->dr, pFilePath, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_wav_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init_file_w(&pWav->dr, pFilePath, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_wav_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init_memory(&pWav->dr, pData, dataSize, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API void ma_wav_uninit(ma_wav* pWav, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL) { + return; + } + + (void)pAllocationCallbacks; + +#if !defined(MA_NO_WAV) + { + ma_dr_wav_uninit(&pWav->dr); + } +#else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } +#endif + + ma_data_source_uninit(&pWav->ds); +} + +MA_API ma_result ma_wav_read_pcm_frames(ma_wav* pWav, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_WAV) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + + ma_wav_get_data_format(pWav, &format, NULL, NULL, NULL, 0); + + switch (format) + { + case ma_format_f32: + { + totalFramesRead = ma_dr_wav_read_pcm_frames_f32(&pWav->dr, frameCount, (float*)pFramesOut); + } break; + + case ma_format_s16: + { + totalFramesRead = ma_dr_wav_read_pcm_frames_s16(&pWav->dr, frameCount, (ma_int16*)pFramesOut); + } break; + + case ma_format_s32: + { + totalFramesRead = ma_dr_wav_read_pcm_frames_s32(&pWav->dr, frameCount, (ma_int32*)pFramesOut); + } break; + + /* Fallback to a raw read. */ + case ma_format_unknown: return MA_INVALID_OPERATION; /* <-- this should never be hit because initialization would just fall back to a supported format. */ + default: + { + totalFramesRead = ma_dr_wav_read_pcm_frames(&pWav->dr, frameCount, pFramesOut); + } break; + } + + /* In the future we'll update ma_dr_wav to return MA_AT_END for us. */ + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + if (result == MA_SUCCESS && totalFramesRead == 0) { + result = MA_AT_END; + } + + return result; + } +#else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_wav_seek_to_pcm_frame(ma_wav* pWav, ma_uint64 frameIndex) +{ + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_seek_to_pcm_frame(&pWav->dr, frameIndex); + if (wavResult != MA_TRUE) { + return MA_ERROR; + } + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_wav_get_data_format(ma_wav* pWav, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pWav == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pWav->format; + } + +#if !defined(MA_NO_WAV) + { + if (pChannels != NULL) { + *pChannels = pWav->dr.channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pWav->dr.sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pChannelMap, channelMapCap, pWav->dr.channels); + } + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_wav_get_cursor_in_pcm_frames(ma_wav* pWav, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_WAV) + { + ma_result wavResult = ma_dr_wav_get_cursor_in_pcm_frames(&pWav->dr, pCursor); + if (wavResult != MA_SUCCESS) { + return (ma_result)wavResult; /* ma_dr_wav result codes map to miniaudio's. */ + } + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_wav_get_length_in_pcm_frames(ma_wav* pWav, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_WAV) + { + ma_result wavResult = ma_dr_wav_get_length_in_pcm_frames(&pWav->dr, pLength); + if (wavResult != MA_SUCCESS) { + return (ma_result)wavResult; /* ma_dr_wav result codes map to miniaudio's. */ + } + + return MA_SUCCESS; + } +#else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + + +static ma_result ma_decoding_backend_init__wav(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__wav(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init_file(pFilePath, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file_w__wav(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init_file_w(pFilePath, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__wav(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__wav(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_wav* pWav = (ma_wav*)pBackend; + + (void)pUserData; + + ma_wav_uninit(pWav, pAllocationCallbacks); + ma_free(pWav, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_wav = +{ + ma_decoding_backend_init__wav, + ma_decoding_backend_init_file__wav, + ma_decoding_backend_init_file_w__wav, + ma_decoding_backend_init_memory__wav, + ma_decoding_backend_uninit__wav +}; + +static ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_wav, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_wav_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_wav, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_wav_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_wav, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_wav_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_wav, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* ma_dr_wav_h */ + +/* FLAC */ +#ifdef ma_dr_flac_h +#define MA_HAS_FLAC + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_format format; /* Can be f32, s16 or s32. */ +#if !defined(MA_NO_FLAC) + ma_dr_flac* dr; +#endif +} ma_flac; + +MA_API ma_result ma_flac_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API ma_result ma_flac_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API ma_result ma_flac_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API ma_result ma_flac_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API void ma_flac_uninit(ma_flac* pFlac, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_flac_read_pcm_frames(ma_flac* pFlac, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_flac_seek_to_pcm_frame(ma_flac* pFlac, ma_uint64 frameIndex); +MA_API ma_result ma_flac_get_data_format(ma_flac* pFlac, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_flac_get_cursor_in_pcm_frames(ma_flac* pFlac, ma_uint64* pCursor); +MA_API ma_result ma_flac_get_length_in_pcm_frames(ma_flac* pFlac, ma_uint64* pLength); + + +static ma_result ma_flac_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_flac_read_pcm_frames((ma_flac*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_flac_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_flac_seek_to_pcm_frame((ma_flac*)pDataSource, frameIndex); +} + +static ma_result ma_flac_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_flac_get_data_format((ma_flac*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_flac_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_flac_get_cursor_in_pcm_frames((ma_flac*)pDataSource, pCursor); +} + +static ma_result ma_flac_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_flac_get_length_in_pcm_frames((ma_flac*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_flac_ds_vtable = +{ + ma_flac_ds_read, + ma_flac_ds_seek, + ma_flac_ds_get_data_format, + ma_flac_ds_get_cursor, + ma_flac_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +#if !defined(MA_NO_FLAC) +static size_t ma_flac_dr_callback__read(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_flac* pFlac = (ma_flac*)pUserData; + ma_result result; + size_t bytesRead; + + MA_ASSERT(pFlac != NULL); + + result = pFlac->onRead(pFlac->pReadSeekTellUserData, pBufferOut, bytesToRead, &bytesRead); + (void)result; + + return bytesRead; +} + +static ma_bool32 ma_flac_dr_callback__seek(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + ma_flac* pFlac = (ma_flac*)pUserData; + ma_result result; + ma_seek_origin maSeekOrigin; + + MA_ASSERT(pFlac != NULL); + + maSeekOrigin = ma_seek_origin_start; + if (origin == ma_dr_flac_seek_origin_current) { + maSeekOrigin = ma_seek_origin_current; + } + + result = pFlac->onSeek(pFlac->pReadSeekTellUserData, offset, maSeekOrigin); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + return MA_TRUE; +} +#endif + +static ma_result ma_flac_init_internal(const ma_decoding_backend_config* pConfig, ma_flac* pFlac) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFlac); + pFlac->format = ma_format_f32; /* f32 by default. */ + + if (pConfig != NULL && (pConfig->preferredFormat == ma_format_f32 || pConfig->preferredFormat == ma_format_s16 || pConfig->preferredFormat == ma_format_s32)) { + pFlac->format = pConfig->preferredFormat; + } else { + /* Getting here means something other than f32 and s16 was specified. Just leave this unset to use the default format. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_flac_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pFlac->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_flac_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pFlac->onRead = onRead; + pFlac->onSeek = onSeek; + pFlac->onTell = onTell; + pFlac->pReadSeekTellUserData = pReadSeekTellUserData; + +#if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open(ma_flac_dr_callback__read, ma_flac_dr_callback__seek, pFlac, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_flac_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open_file(pFilePath, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_flac_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open_file_w(pFilePath, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_flac_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open_memory(pData, dataSize, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API void ma_flac_uninit(ma_flac* pFlac, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFlac == NULL) { + return; + } + + (void)pAllocationCallbacks; + +#if !defined(MA_NO_FLAC) + { + ma_dr_flac_close(pFlac->dr); + } +#else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } +#endif + + ma_data_source_uninit(&pFlac->ds); +} + +MA_API ma_result ma_flac_read_pcm_frames(ma_flac* pFlac, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_FLAC) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + + ma_flac_get_data_format(pFlac, &format, NULL, NULL, NULL, 0); + + switch (format) + { + case ma_format_f32: + { + totalFramesRead = ma_dr_flac_read_pcm_frames_f32(pFlac->dr, frameCount, (float*)pFramesOut); + } break; + + case ma_format_s16: + { + totalFramesRead = ma_dr_flac_read_pcm_frames_s16(pFlac->dr, frameCount, (ma_int16*)pFramesOut); + } break; + + case ma_format_s32: + { + totalFramesRead = ma_dr_flac_read_pcm_frames_s32(pFlac->dr, frameCount, (ma_int32*)pFramesOut); + } break; + + case ma_format_u8: + case ma_format_s24: + case ma_format_unknown: + default: + { + return MA_INVALID_OPERATION; + }; + } + + /* In the future we'll update ma_dr_flac to return MA_AT_END for us. */ + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + if (result == MA_SUCCESS && totalFramesRead == 0) { + result = MA_AT_END; + } + + return result; + } +#else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_flac_seek_to_pcm_frame(ma_flac* pFlac, ma_uint64 frameIndex) +{ + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_FLAC) + { + ma_bool32 flacResult; + + flacResult = ma_dr_flac_seek_to_pcm_frame(pFlac->dr, frameIndex); + if (flacResult != MA_TRUE) { + return MA_ERROR; + } + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_flac_get_data_format(ma_flac* pFlac, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pFlac == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pFlac->format; + } + +#if !defined(MA_NO_FLAC) + { + if (pChannels != NULL) { + *pChannels = pFlac->dr->channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pFlac->dr->sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pChannelMap, channelMapCap, pFlac->dr->channels); + } + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_flac_get_cursor_in_pcm_frames(ma_flac* pFlac, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_FLAC) + { + *pCursor = pFlac->dr->currentPCMFrame; + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_flac_get_length_in_pcm_frames(ma_flac* pFlac, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_FLAC) + { + *pLength = pFlac->dr->totalPCMFrameCount; + + return MA_SUCCESS; + } +#else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + + +static ma_result ma_decoding_backend_init__flac(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__flac(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init_file(pFilePath, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file_w__flac(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init_file_w(pFilePath, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__flac(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__flac(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_flac* pFlac = (ma_flac*)pBackend; + + (void)pUserData; + + ma_flac_uninit(pFlac, pAllocationCallbacks); + ma_free(pFlac, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_flac = +{ + ma_decoding_backend_init__flac, + ma_decoding_backend_init_file__flac, + ma_decoding_backend_init_file_w__flac, + ma_decoding_backend_init_memory__flac, + ma_decoding_backend_uninit__flac +}; + +static ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_flac, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_flac_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_flac, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_flac_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_flac, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_flac_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_flac, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* ma_dr_flac_h */ + +/* MP3 */ +#ifdef ma_dr_mp3_h +#define MA_HAS_MP3 + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_format format; /* Can be f32 or s16. */ +#if !defined(MA_NO_MP3) + ma_dr_mp3 dr; + ma_uint32 seekPointCount; + ma_dr_mp3_seek_point* pSeekPoints; /* Only used if seek table generation is used. */ +#endif +} ma_mp3; + +MA_API ma_result ma_mp3_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API ma_result ma_mp3_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API ma_result ma_mp3_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API ma_result ma_mp3_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API void ma_mp3_uninit(ma_mp3* pMP3, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_mp3_read_pcm_frames(ma_mp3* pMP3, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_mp3_seek_to_pcm_frame(ma_mp3* pMP3, ma_uint64 frameIndex); +MA_API ma_result ma_mp3_get_data_format(ma_mp3* pMP3, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_mp3_get_cursor_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pCursor); +MA_API ma_result ma_mp3_get_length_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pLength); + + +static ma_result ma_mp3_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_mp3_read_pcm_frames((ma_mp3*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_mp3_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_mp3_seek_to_pcm_frame((ma_mp3*)pDataSource, frameIndex); +} + +static ma_result ma_mp3_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_mp3_get_data_format((ma_mp3*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_mp3_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_mp3_get_cursor_in_pcm_frames((ma_mp3*)pDataSource, pCursor); +} + +static ma_result ma_mp3_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_mp3_get_length_in_pcm_frames((ma_mp3*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_mp3_ds_vtable = +{ + ma_mp3_ds_read, + ma_mp3_ds_seek, + ma_mp3_ds_get_data_format, + ma_mp3_ds_get_cursor, + ma_mp3_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +#if !defined(MA_NO_MP3) +static size_t ma_mp3_dr_callback__read(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_mp3* pMP3 = (ma_mp3*)pUserData; + ma_result result; + size_t bytesRead; + + MA_ASSERT(pMP3 != NULL); + + result = pMP3->onRead(pMP3->pReadSeekTellUserData, pBufferOut, bytesToRead, &bytesRead); + (void)result; + + return bytesRead; +} + +static ma_bool32 ma_mp3_dr_callback__seek(void* pUserData, int offset, ma_dr_mp3_seek_origin origin) +{ + ma_mp3* pMP3 = (ma_mp3*)pUserData; + ma_result result; + ma_seek_origin maSeekOrigin; + + MA_ASSERT(pMP3 != NULL); + + maSeekOrigin = ma_seek_origin_start; + if (origin == ma_dr_mp3_seek_origin_current) { + maSeekOrigin = ma_seek_origin_current; + } + + result = pMP3->onSeek(pMP3->pReadSeekTellUserData, offset, maSeekOrigin); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + return MA_TRUE; +} +#endif + +static ma_result ma_mp3_init_internal(const ma_decoding_backend_config* pConfig, ma_mp3* pMP3) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pMP3); + pMP3->format = ma_format_f32; /* f32 by default. */ + + if (pConfig != NULL && (pConfig->preferredFormat == ma_format_f32 || pConfig->preferredFormat == ma_format_s16)) { + pMP3->format = pConfig->preferredFormat; + } else { + /* Getting here means something other than f32 and s16 was specified. Just leave this unset to use the default format. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_mp3_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pMP3->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_mp3_generate_seek_table(ma_mp3* pMP3, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 mp3Result; + ma_uint32 seekPointCount = 0; + ma_dr_mp3_seek_point* pSeekPoints = NULL; + + MA_ASSERT(pMP3 != NULL); + MA_ASSERT(pConfig != NULL); + + seekPointCount = pConfig->seekPointCount; + if (seekPointCount > 0) { + pSeekPoints = (ma_dr_mp3_seek_point*)ma_malloc(sizeof(*pMP3->pSeekPoints) * seekPointCount, pAllocationCallbacks); + if (pSeekPoints == NULL) { + return MA_OUT_OF_MEMORY; + } + } + + mp3Result = ma_dr_mp3_calculate_seek_points(&pMP3->dr, &seekPointCount, pSeekPoints); + if (mp3Result != MA_TRUE) { + ma_free(pSeekPoints, pAllocationCallbacks); + return MA_ERROR; + } + + mp3Result = ma_dr_mp3_bind_seek_table(&pMP3->dr, seekPointCount, pSeekPoints); + if (mp3Result != MA_TRUE) { + ma_free(pSeekPoints, pAllocationCallbacks); + return MA_ERROR; + } + + pMP3->seekPointCount = seekPointCount; + pMP3->pSeekPoints = pSeekPoints; + + return MA_SUCCESS; +} + +static ma_result ma_mp3_post_init(ma_mp3* pMP3, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + + result = ma_mp3_generate_seek_table(pMP3, pConfig, pAllocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_mp3_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pMP3->onRead = onRead; + pMP3->onSeek = onSeek; + pMP3->onTell = onTell; + pMP3->pReadSeekTellUserData = pReadSeekTellUserData; + +#if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init(&pMP3->dr, ma_mp3_dr_callback__read, ma_mp3_dr_callback__seek, pMP3, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_mp3_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init_file(&pMP3->dr, pFilePath, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_mp3_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init_file_w(&pMP3->dr, pFilePath, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_mp3_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init_memory(&pMP3->dr, pData, dataSize, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API void ma_mp3_uninit(ma_mp3* pMP3, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pMP3 == NULL) { + return; + } + +#if !defined(MA_NO_MP3) + { + ma_dr_mp3_uninit(&pMP3->dr); + } +#else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } +#endif + + /* Seek points need to be freed after the MP3 decoder has been uninitialized to ensure they're no longer being referenced. */ + ma_free(pMP3->pSeekPoints, pAllocationCallbacks); + + ma_data_source_uninit(&pMP3->ds); +} + +MA_API ma_result ma_mp3_read_pcm_frames(ma_mp3* pMP3, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_MP3) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + + ma_mp3_get_data_format(pMP3, &format, NULL, NULL, NULL, 0); + + switch (format) + { + case ma_format_f32: + { + totalFramesRead = ma_dr_mp3_read_pcm_frames_f32(&pMP3->dr, frameCount, (float*)pFramesOut); + } break; + + case ma_format_s16: + { + totalFramesRead = ma_dr_mp3_read_pcm_frames_s16(&pMP3->dr, frameCount, (ma_int16*)pFramesOut); + } break; + + case ma_format_u8: + case ma_format_s24: + case ma_format_s32: + case ma_format_unknown: + default: + { + return MA_INVALID_OPERATION; + }; + } + + /* In the future we'll update ma_dr_mp3 to return MA_AT_END for us. */ + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; + } +#else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_mp3_seek_to_pcm_frame(ma_mp3* pMP3, ma_uint64 frameIndex) +{ + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_seek_to_pcm_frame(&pMP3->dr, frameIndex); + if (mp3Result != MA_TRUE) { + return MA_ERROR; + } + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_mp3_get_data_format(ma_mp3* pMP3, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pMP3 == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pMP3->format; + } + +#if !defined(MA_NO_MP3) + { + if (pChannels != NULL) { + *pChannels = pMP3->dr.channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pMP3->dr.sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pMP3->dr.channels); + } + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_mp3_get_cursor_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_MP3) + { + *pCursor = pMP3->dr.currentPCMFrame; + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_mp3_get_length_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_MP3) + { + *pLength = ma_dr_mp3_get_pcm_frame_count(&pMP3->dr); + + return MA_SUCCESS; + } +#else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + + +static ma_result ma_decoding_backend_init__mp3(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__mp3(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init_file(pFilePath, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file_w__mp3(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init_file_w(pFilePath, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__mp3(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__mp3(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_mp3* pMP3 = (ma_mp3*)pBackend; + + (void)pUserData; + + ma_mp3_uninit(pMP3, pAllocationCallbacks); + ma_free(pMP3, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_mp3 = +{ + ma_decoding_backend_init__mp3, + ma_decoding_backend_init_file__mp3, + ma_decoding_backend_init_file_w__mp3, + ma_decoding_backend_init_memory__mp3, + ma_decoding_backend_uninit__mp3 +}; + +static ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_mp3_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_mp3_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_mp3_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* ma_dr_mp3_h */ + +/* Vorbis */ +#ifdef STB_VORBIS_INCLUDE_STB_VORBIS_H +#define MA_HAS_VORBIS + +/* The size in bytes of each chunk of data to read from the Vorbis stream. */ +#define MA_VORBIS_DATA_CHUNK_SIZE 4096 + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_allocation_callbacks allocationCallbacks; /* Store the allocation callbacks within the structure because we may need to dynamically expand a buffer in ma_stbvorbis_read_pcm_frames() when using push mode. */ + ma_format format; /* Only f32 is allowed with stb_vorbis. */ + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint64 cursor; +#if !defined(MA_NO_VORBIS) + stb_vorbis* stb; + ma_bool32 usingPushMode; + struct + { + ma_uint8* pData; + size_t dataSize; + size_t dataCapacity; + size_t audioStartOffsetInBytes; + ma_uint32 framesConsumed; /* The number of frames consumed in ppPacketData. */ + ma_uint32 framesRemaining; /* The number of frames remaining in ppPacketData. */ + float** ppPacketData; + } push; +#endif +} ma_stbvorbis; + +MA_API ma_result ma_stbvorbis_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis); +MA_API ma_result ma_stbvorbis_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis); +MA_API ma_result ma_stbvorbis_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis); +MA_API void ma_stbvorbis_uninit(ma_stbvorbis* pVorbis, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_stbvorbis_read_pcm_frames(ma_stbvorbis* pVorbis, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_stbvorbis_seek_to_pcm_frame(ma_stbvorbis* pVorbis, ma_uint64 frameIndex); +MA_API ma_result ma_stbvorbis_get_data_format(ma_stbvorbis* pVorbis, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_stbvorbis_get_cursor_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pCursor); +MA_API ma_result ma_stbvorbis_get_length_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pLength); + + +static ma_result ma_stbvorbis_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_stbvorbis_read_pcm_frames((ma_stbvorbis*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_stbvorbis_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_stbvorbis_seek_to_pcm_frame((ma_stbvorbis*)pDataSource, frameIndex); +} + +static ma_result ma_stbvorbis_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_stbvorbis_get_data_format((ma_stbvorbis*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_stbvorbis_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_stbvorbis_get_cursor_in_pcm_frames((ma_stbvorbis*)pDataSource, pCursor); +} + +static ma_result ma_stbvorbis_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_stbvorbis_get_length_in_pcm_frames((ma_stbvorbis*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_stbvorbis_ds_vtable = +{ + ma_stbvorbis_ds_read, + ma_stbvorbis_ds_seek, + ma_stbvorbis_ds_get_data_format, + ma_stbvorbis_ds_get_cursor, + ma_stbvorbis_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +static ma_result ma_stbvorbis_init_internal(const ma_decoding_backend_config* pConfig, ma_stbvorbis* pVorbis) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + (void)pConfig; + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pVorbis); + pVorbis->format = ma_format_f32; /* Only supporting f32. */ + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_stbvorbis_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pVorbis->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +#if !defined(MA_NO_VORBIS) +static ma_result ma_stbvorbis_post_init(ma_stbvorbis* pVorbis) +{ + stb_vorbis_info info; + + MA_ASSERT(pVorbis != NULL); + + info = stb_vorbis_get_info(pVorbis->stb); + + pVorbis->channels = info.channels; + pVorbis->sampleRate = info.sample_rate; + + return MA_SUCCESS; +} + +static ma_result ma_stbvorbis_init_internal_decoder_push(ma_stbvorbis* pVorbis) +{ + ma_result result; + stb_vorbis* stb; + size_t dataSize = 0; + size_t dataCapacity = 0; + ma_uint8* pData = NULL; /* <-- Must be initialized to NULL. */ + + for (;;) { + int vorbisError; + int consumedDataSize; /* <-- Fill by stb_vorbis_open_pushdata(). */ + size_t bytesRead; + ma_uint8* pNewData; + + /* Allocate memory for the new chunk. */ + dataCapacity += MA_VORBIS_DATA_CHUNK_SIZE; + pNewData = (ma_uint8*)ma_realloc(pData, dataCapacity, &pVorbis->allocationCallbacks); + if (pNewData == NULL) { + ma_free(pData, &pVorbis->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + pData = pNewData; + + /* Read in the next chunk. */ + result = pVorbis->onRead(pVorbis->pReadSeekTellUserData, ma_offset_ptr(pData, dataSize), (dataCapacity - dataSize), &bytesRead); + dataSize += bytesRead; + + if (result != MA_SUCCESS) { + ma_free(pData, &pVorbis->allocationCallbacks); + return result; + } + + /* We have a maximum of 31 bits with stb_vorbis. */ + if (dataSize > INT_MAX) { + ma_free(pData, &pVorbis->allocationCallbacks); + return MA_TOO_BIG; + } + + stb = stb_vorbis_open_pushdata(pData, (int)dataSize, &consumedDataSize, &vorbisError, NULL); + if (stb != NULL) { + /* + Successfully opened the Vorbis decoder. We might have some leftover unprocessed + data so we'll need to move that down to the front. + */ + dataSize -= (size_t)consumedDataSize; /* Consume the data. */ + MA_MOVE_MEMORY(pData, ma_offset_ptr(pData, consumedDataSize), dataSize); + + /* + We need to track the start point so we can seek back to the start of the audio + data when seeking. + */ + pVorbis->push.audioStartOffsetInBytes = consumedDataSize; + + break; + } else { + /* Failed to open the decoder. */ + if (vorbisError == VORBIS_need_more_data) { + continue; + } else { + ma_free(pData, &pVorbis->allocationCallbacks); + return MA_ERROR; /* Failed to open the stb_vorbis decoder. */ + } + } + } + + MA_ASSERT(stb != NULL); + pVorbis->stb = stb; + pVorbis->push.pData = pData; + pVorbis->push.dataSize = dataSize; + pVorbis->push.dataCapacity = dataCapacity; + + return MA_SUCCESS; +} +#endif + +MA_API ma_result ma_stbvorbis_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis) +{ + ma_result result; + + result = ma_stbvorbis_init_internal(pConfig, pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pVorbis->onRead = onRead; + pVorbis->onSeek = onSeek; + pVorbis->onTell = onTell; + pVorbis->pReadSeekTellUserData = pReadSeekTellUserData; + ma_allocation_callbacks_init_copy(&pVorbis->allocationCallbacks, pAllocationCallbacks); + +#if !defined(MA_NO_VORBIS) + { + /* + stb_vorbis lacks a callback based API for it's pulling API which means we're stuck with the + pushing API. In order for us to be able to successfully initialize the decoder we need to + supply it with enough data. We need to keep loading data until we have enough. + */ + result = ma_stbvorbis_init_internal_decoder_push(pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + pVorbis->usingPushMode = MA_TRUE; + + result = ma_stbvorbis_post_init(pVorbis); + if (result != MA_SUCCESS) { + stb_vorbis_close(pVorbis->stb); + ma_free(pVorbis->push.pData, pAllocationCallbacks); + return result; + } + + return MA_SUCCESS; + } +#else + { + /* vorbis is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_stbvorbis_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis) +{ + ma_result result; + + result = ma_stbvorbis_init_internal(pConfig, pVorbis); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_VORBIS) + { + (void)pAllocationCallbacks; /* Don't know how to make use of this with stb_vorbis. */ + + /* We can use stb_vorbis' pull mode for file based streams. */ + pVorbis->stb = stb_vorbis_open_filename(pFilePath, NULL, NULL); + if (pVorbis->stb == NULL) { + return MA_INVALID_FILE; + } + + pVorbis->usingPushMode = MA_FALSE; + + result = ma_stbvorbis_post_init(pVorbis); + if (result != MA_SUCCESS) { + stb_vorbis_close(pVorbis->stb); + return result; + } + + return MA_SUCCESS; + } +#else + { + /* vorbis is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_stbvorbis_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis) +{ + ma_result result; + + result = ma_stbvorbis_init_internal(pConfig, pVorbis); + if (result != MA_SUCCESS) { + return result; + } + +#if !defined(MA_NO_VORBIS) + { + (void)pAllocationCallbacks; + + /* stb_vorbis uses an int as it's size specifier, restricting it to 32-bit even on 64-bit systems. *sigh*. */ + if (dataSize > INT_MAX) { + return MA_TOO_BIG; + } + + pVorbis->stb = stb_vorbis_open_memory((const unsigned char*)pData, (int)dataSize, NULL, NULL); + if (pVorbis->stb == NULL) { + return MA_INVALID_FILE; + } + + pVorbis->usingPushMode = MA_FALSE; + + result = ma_stbvorbis_post_init(pVorbis); + if (result != MA_SUCCESS) { + stb_vorbis_close(pVorbis->stb); + return result; + } + + return MA_SUCCESS; + } +#else + { + /* vorbis is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API void ma_stbvorbis_uninit(ma_stbvorbis* pVorbis, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pVorbis == NULL) { + return; + } + +#if !defined(MA_NO_VORBIS) + { + stb_vorbis_close(pVorbis->stb); + + /* We'll have to clear some memory if we're using push mode. */ + if (pVorbis->usingPushMode) { + ma_free(pVorbis->push.pData, pAllocationCallbacks); + } + } +#else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } +#endif + + ma_data_source_uninit(&pVorbis->ds); +} + +MA_API ma_result ma_stbvorbis_read_pcm_frames(ma_stbvorbis* pVorbis, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_VORBIS) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + ma_uint32 channels; + + ma_stbvorbis_get_data_format(pVorbis, &format, &channels, NULL, NULL, 0); + + if (format == ma_format_f32) { + /* We read differently depending on whether or not we're using push mode. */ + if (pVorbis->usingPushMode) { + /* Push mode. This is the complex case. */ + float* pFramesOutF32 = (float*)pFramesOut; + + while (totalFramesRead < frameCount) { + /* The first thing to do is read from any already-cached frames. */ + ma_uint32 framesToReadFromCache = (ma_uint32)ma_min(pVorbis->push.framesRemaining, (frameCount - totalFramesRead)); /* Safe cast because pVorbis->framesRemaining is 32-bit. */ + + /* The output pointer can be null in which case we just treate it as a seek. */ + if (pFramesOut != NULL) { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < framesToReadFromCache; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pVorbis->channels; iChannel += 1) { + pFramesOutF32[iChannel] = pVorbis->push.ppPacketData[iChannel][pVorbis->push.framesConsumed + iFrame]; + } + + pFramesOutF32 += pVorbis->channels; + } + } + + /* Update pointers and counters. */ + pVorbis->push.framesConsumed += framesToReadFromCache; + pVorbis->push.framesRemaining -= framesToReadFromCache; + totalFramesRead += framesToReadFromCache; + + /* Don't bother reading any more frames right now if we've just finished loading. */ + if (totalFramesRead == frameCount) { + break; + } + + MA_ASSERT(pVorbis->push.framesRemaining == 0); + + /* Getting here means we've run out of cached frames. We'll need to load some more. */ + for (;;) { + int samplesRead = 0; + int consumedDataSize; + + /* We need to case dataSize to an int, so make sure we can do it safely. */ + if (pVorbis->push.dataSize > INT_MAX) { + break; /* Too big. */ + } + + consumedDataSize = stb_vorbis_decode_frame_pushdata(pVorbis->stb, pVorbis->push.pData, (int)pVorbis->push.dataSize, NULL, &pVorbis->push.ppPacketData, &samplesRead); + if (consumedDataSize != 0) { + /* Successfully decoded a Vorbis frame. Consume the data. */ + pVorbis->push.dataSize -= (size_t)consumedDataSize; + MA_MOVE_MEMORY(pVorbis->push.pData, ma_offset_ptr(pVorbis->push.pData, consumedDataSize), pVorbis->push.dataSize); + + pVorbis->push.framesConsumed = 0; + pVorbis->push.framesRemaining = samplesRead; + + break; + } else { + /* Not enough data. Read more. */ + size_t bytesRead; + + /* Expand the data buffer if necessary. */ + if (pVorbis->push.dataCapacity == pVorbis->push.dataSize) { + size_t newCap = pVorbis->push.dataCapacity + MA_VORBIS_DATA_CHUNK_SIZE; + ma_uint8* pNewData; + + pNewData = (ma_uint8*)ma_realloc(pVorbis->push.pData, newCap, &pVorbis->allocationCallbacks); + if (pNewData == NULL) { + result = MA_OUT_OF_MEMORY; + break; + } + + pVorbis->push.pData = pNewData; + pVorbis->push.dataCapacity = newCap; + } + + /* We should have enough room to load some data. */ + result = pVorbis->onRead(pVorbis->pReadSeekTellUserData, ma_offset_ptr(pVorbis->push.pData, pVorbis->push.dataSize), (pVorbis->push.dataCapacity - pVorbis->push.dataSize), &bytesRead); + pVorbis->push.dataSize += bytesRead; + + if (result != MA_SUCCESS) { + break; /* Failed to read any data. Get out. */ + } + } + } + + /* If we don't have a success code at this point it means we've encounted an error or the end of the file has been reached (probably the latter). */ + if (result != MA_SUCCESS) { + break; + } + } + } else { + /* Pull mode. This is the simple case, but we still need to run in a loop because stb_vorbis loves using 32-bit instead of 64-bit. */ + while (totalFramesRead < frameCount) { + ma_uint64 framesRemaining = (frameCount - totalFramesRead); + int framesRead; + + if (framesRemaining > INT_MAX) { + framesRemaining = INT_MAX; + } + + framesRead = stb_vorbis_get_samples_float_interleaved(pVorbis->stb, channels, (float*)ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, format, channels), (int)framesRemaining * channels); /* Safe cast. */ + totalFramesRead += framesRead; + + if (framesRead < (int)framesRemaining) { + break; /* Nothing left to read. Get out. */ + } + } + } + } else { + result = MA_INVALID_ARGS; + } + + pVorbis->cursor += totalFramesRead; + + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + if (result == MA_SUCCESS && totalFramesRead == 0) { + result = MA_AT_END; + } + + return result; + } +#else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_stbvorbis_seek_to_pcm_frame(ma_stbvorbis* pVorbis, ma_uint64 frameIndex) +{ + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_VORBIS) + { + /* Different seeking methods depending on whether or not we're using push mode. */ + if (pVorbis->usingPushMode) { + /* Push mode. This is the complex case. */ + ma_result result; + float buffer[4096]; + + /* If we're seeking backwards, we need to seek back to the start and then brute-force forward. */ + if (frameIndex < pVorbis->cursor) { + if (frameIndex > 0x7FFFFFFF) { + return MA_INVALID_ARGS; /* Trying to seek beyond the 32-bit maximum of stb_vorbis. */ + } + + /* + This is wildly inefficient due to me having trouble getting sample exact seeking working + robustly with stb_vorbis_flush_pushdata(). The only way I can think to make this work + perfectly is to reinitialize the decoder. Note that we only enter this path when seeking + backwards. This will hopefully be removed once we get our own Vorbis decoder implemented. + */ + stb_vorbis_close(pVorbis->stb); + ma_free(pVorbis->push.pData, &pVorbis->allocationCallbacks); + + MA_ZERO_OBJECT(&pVorbis->push); + + /* Seek to the start of the file. */ + result = pVorbis->onSeek(pVorbis->pReadSeekTellUserData, 0, ma_seek_origin_start); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_stbvorbis_init_internal_decoder_push(pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + /* At this point we should be sitting on the first frame. */ + pVorbis->cursor = 0; + } + + /* We're just brute-forcing this for now. */ + while (pVorbis->cursor < frameIndex) { + ma_uint64 framesRead; + ma_uint64 framesToRead = ma_countof(buffer)/pVorbis->channels; + if (framesToRead > (frameIndex - pVorbis->cursor)) { + framesToRead = (frameIndex - pVorbis->cursor); + } + + result = ma_stbvorbis_read_pcm_frames(pVorbis, buffer, framesToRead, &framesRead); + if (result != MA_SUCCESS) { + return result; + } + } + } else { + /* Pull mode. This is the simple case. */ + int vorbisResult; + + if (frameIndex > UINT_MAX) { + return MA_INVALID_ARGS; /* Trying to seek beyond the 32-bit maximum of stb_vorbis. */ + } + + vorbisResult = stb_vorbis_seek(pVorbis->stb, (unsigned int)frameIndex); /* Safe cast. */ + if (vorbisResult == 0) { + return MA_ERROR; /* See failed. */ + } + + pVorbis->cursor = frameIndex; + } + + return MA_SUCCESS; + } +#else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_stbvorbis_get_data_format(ma_stbvorbis* pVorbis, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pVorbis == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pVorbis->format; + } + +#if !defined(MA_NO_VORBIS) + { + if (pChannels != NULL) { + *pChannels = pVorbis->channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pVorbis->sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_vorbis, pChannelMap, channelMapCap, pVorbis->channels); + } + + return MA_SUCCESS; + } +#else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_stbvorbis_get_cursor_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_VORBIS) + { + *pCursor = pVorbis->cursor; + + return MA_SUCCESS; + } +#else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + +MA_API ma_result ma_stbvorbis_get_length_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_VORBIS) + { + if (pVorbis->usingPushMode) { + *pLength = 0; /* I don't know of a good way to determine this reliably with stb_vorbis and push mode. */ + } else { + *pLength = stb_vorbis_stream_length_in_samples(pVorbis->stb); + } + + return MA_SUCCESS; + } +#else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } +#endif +} + + +static ma_result ma_decoding_backend_init__stbvorbis(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_stbvorbis* pVorbis; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pVorbis = (ma_stbvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks); + if (pVorbis == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_stbvorbis_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pVorbis); + if (result != MA_SUCCESS) { + ma_free(pVorbis, pAllocationCallbacks); + return result; + } + + *ppBackend = pVorbis; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__stbvorbis(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_stbvorbis* pVorbis; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pVorbis = (ma_stbvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks); + if (pVorbis == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_stbvorbis_init_file(pFilePath, pConfig, pAllocationCallbacks, pVorbis); + if (result != MA_SUCCESS) { + ma_free(pVorbis, pAllocationCallbacks); + return result; + } + + *ppBackend = pVorbis; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__stbvorbis(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_stbvorbis* pVorbis; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pVorbis = (ma_stbvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks); + if (pVorbis == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_stbvorbis_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pVorbis); + if (result != MA_SUCCESS) { + ma_free(pVorbis, pAllocationCallbacks); + return result; + } + + *ppBackend = pVorbis; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__stbvorbis(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_stbvorbis* pVorbis = (ma_stbvorbis*)pBackend; + + (void)pUserData; + + ma_stbvorbis_uninit(pVorbis, pAllocationCallbacks); + ma_free(pVorbis, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_stbvorbis = +{ + ma_decoding_backend_init__stbvorbis, + ma_decoding_backend_init_file__stbvorbis, + NULL, /* onInitFileW() */ + ma_decoding_backend_init_memory__stbvorbis, + ma_decoding_backend_uninit__stbvorbis +}; + +static ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_vorbis_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_vorbis_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_vorbis_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* STB_VORBIS_INCLUDE_STB_VORBIS_H */ + + + +static ma_result ma_decoder__init_allocation_callbacks(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + MA_ASSERT(pDecoder != NULL); + + if (pConfig != NULL) { + return ma_allocation_callbacks_init_copy(&pDecoder->allocationCallbacks, &pConfig->allocationCallbacks); + } else { + pDecoder->allocationCallbacks = ma_allocation_callbacks_init_default(); + return MA_SUCCESS; + } +} + +static ma_result ma_decoder__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_decoder_read_pcm_frames((ma_decoder*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_decoder__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_decoder_seek_to_pcm_frame((ma_decoder*)pDataSource, frameIndex); +} + +static ma_result ma_decoder__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_decoder_get_data_format((ma_decoder*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_decoder__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_decoder_get_cursor_in_pcm_frames((ma_decoder*)pDataSource, pCursor); +} + +static ma_result ma_decoder__data_source_on_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_decoder_get_length_in_pcm_frames((ma_decoder*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_decoder_data_source_vtable = +{ + ma_decoder__data_source_on_read, + ma_decoder__data_source_on_seek, + ma_decoder__data_source_on_get_data_format, + ma_decoder__data_source_on_get_cursor, + ma_decoder__data_source_on_get_length, + NULL, /* onSetLooping */ + 0 +}; + +static ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, ma_decoder_tell_proc onTell, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + MA_ASSERT(pConfig != NULL); + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDecoder); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_decoder_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pDecoder->ds); + if (result != MA_SUCCESS) { + return result; + } + + pDecoder->onRead = onRead; + pDecoder->onSeek = onSeek; + pDecoder->onTell = onTell; + pDecoder->pUserData = pUserData; + + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + ma_data_source_uninit(&pDecoder->ds); + return result; + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__postinit(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__init_data_converter(pDecoder, pConfig); + + /* If we failed post initialization we need to uninitialize the decoder before returning to prevent a memory leak. */ + if (result != MA_SUCCESS) { + ma_decoder_uninit(pDecoder); + return result; + } + + return result; +} + + +static ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + /* Silence some warnings in the case that we don't have any decoder backends enabled. */ + (void)onRead; + (void)onSeek; + (void)pUserData; + + + /* If we've specified a specific encoding type, try that first. */ + if (pConfig->encodingFormat != ma_encoding_format_unknown) { +#ifdef MA_HAS_WAV + if (pConfig->encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav__internal(pConfig, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (pConfig->encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac__internal(pConfig, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (pConfig->encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3__internal(pConfig, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (pConfig->encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis__internal(pConfig, pDecoder); + } +#endif + + /* If we weren't able to initialize the decoder, seek back to the start to give the next attempts a clean start. */ + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + + if (result != MA_SUCCESS) { + /* Getting here means we couldn't load a specific decoding backend based on the encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + if (result != MA_SUCCESS) { + result = ma_decoder_init_custom__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (pConfig->encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif + } + + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(onRead, onSeek, NULL, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init__internal(onRead, onSeek, pUserData, &config, pDecoder); +} + + +static ma_result ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + size_t bytesRemaining; + + MA_ASSERT(pDecoder->data.memory.dataSize >= pDecoder->data.memory.currentReadPos); + + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + + bytesRemaining = pDecoder->data.memory.dataSize - pDecoder->data.memory.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + + if (bytesRemaining == 0) { + return MA_AT_END; + } + + if (bytesToRead > 0) { + MA_COPY_MEMORY(pBufferOut, pDecoder->data.memory.pData + pDecoder->data.memory.currentReadPos, bytesToRead); + pDecoder->data.memory.currentReadPos += bytesToRead; + } + + if (pBytesRead != NULL) { + *pBytesRead = bytesToRead; + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__on_seek_memory(ma_decoder* pDecoder, ma_int64 byteOffset, ma_seek_origin origin) +{ + if (byteOffset > 0 && (ma_uint64)byteOffset > MA_SIZE_MAX) { + return MA_BAD_SEEK; + } + + if (origin == ma_seek_origin_current) { + if (byteOffset > 0) { + if (pDecoder->data.memory.currentReadPos + byteOffset > pDecoder->data.memory.dataSize) { + byteOffset = (ma_int64)(pDecoder->data.memory.dataSize - pDecoder->data.memory.currentReadPos); /* Trying to seek too far forward. */ + } + + pDecoder->data.memory.currentReadPos += (size_t)byteOffset; + } else { + if (pDecoder->data.memory.currentReadPos < (size_t)-byteOffset) { + byteOffset = -(ma_int64)pDecoder->data.memory.currentReadPos; /* Trying to seek too far backwards. */ + } + + pDecoder->data.memory.currentReadPos -= (size_t)-byteOffset; + } + } else { + if (origin == ma_seek_origin_end) { + if (byteOffset < 0) { + byteOffset = -byteOffset; + } + + if (byteOffset > (ma_int64)pDecoder->data.memory.dataSize) { + pDecoder->data.memory.currentReadPos = 0; /* Trying to seek too far back. */ + } else { + pDecoder->data.memory.currentReadPos = pDecoder->data.memory.dataSize - (size_t)byteOffset; + } + } else { + if ((size_t)byteOffset <= pDecoder->data.memory.dataSize) { + pDecoder->data.memory.currentReadPos = (size_t)byteOffset; + } else { + pDecoder->data.memory.currentReadPos = pDecoder->data.memory.dataSize; /* Trying to seek too far forward. */ + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__on_tell_memory(ma_decoder* pDecoder, ma_int64* pCursor) +{ + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pCursor != NULL); + + *pCursor = (ma_int64)pDecoder->data.memory.currentReadPos; + + return MA_SUCCESS; +} + +static ma_result ma_decoder__preinit_memory_wrapper(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, ma_decoder__on_tell_memory, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + pDecoder->data.memory.pData = (const ma_uint8*)pData; + pDecoder->data.memory.dataSize = dataSize; + pDecoder->data.memory.currentReadPos = 0; + + (void)pConfig; + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(NULL, NULL, NULL, NULL, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + /* If the backend has support for loading from a file path we'll want to use that. If that all fails we'll fall back to the VFS path. */ + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { +#ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + result = ma_decoder_init_custom_from_memory__internal(pData, dataSize, &config, pDecoder); + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (result != MA_SUCCESS && config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + /* Use trial and error for stock decoders. */ + if (result != MA_SUCCESS) { +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis_from_memory__internal(pData, dataSize, &config, pDecoder); + } +#endif + } + } + + /* + If at this point we still haven't successfully initialized the decoder it most likely means + the backend doesn't have an implementation for loading from a file path. We'll try using + miniaudio's built-in file IO for loading file. + */ + if (result == MA_SUCCESS) { + /* Initialization was successful. Finish up. */ + result = ma_decoder__postinit(&config, pDecoder); + if (result != MA_SUCCESS) { + /* + The backend was initialized successfully, but for some reason post-initialization failed. This is most likely + due to an out of memory error. We're going to abort with an error here and not try to recover. + */ + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, &pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + + return result; + } + } else { + /* Probably no implementation for loading from a block of memory. Use miniaudio's abstraction instead. */ + result = ma_decoder__preinit_memory_wrapper(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder_init__internal(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + + +#if defined(MA_HAS_WAV) || \ + defined(MA_HAS_MP3) || \ + defined(MA_HAS_FLAC) || \ + defined(MA_HAS_VORBIS) || \ + defined(MA_HAS_OPUS) +#define MA_HAS_PATH_API +#endif + +#if defined(MA_HAS_PATH_API) +static const char* ma_path_file_name(const char* path) +{ + const char* fileName; + + if (path == NULL) { + return NULL; + } + + fileName = path; + + /* We just loop through the path until we find the last slash. */ + while (path[0] != '\0') { + if (path[0] == '/' || path[0] == '\\') { + fileName = path; + } + + path += 1; + } + + /* At this point the file name is sitting on a slash, so just move forward. */ + while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) { + fileName += 1; + } + + return fileName; +} + +static const wchar_t* ma_path_file_name_w(const wchar_t* path) +{ + const wchar_t* fileName; + + if (path == NULL) { + return NULL; + } + + fileName = path; + + /* We just loop through the path until we find the last slash. */ + while (path[0] != '\0') { + if (path[0] == '/' || path[0] == '\\') { + fileName = path; + } + + path += 1; + } + + /* At this point the file name is sitting on a slash, so just move forward. */ + while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) { + fileName += 1; + } + + return fileName; +} + + +static const char* ma_path_extension(const char* path) +{ + const char* extension; + const char* lastOccurance; + + if (path == NULL) { + path = ""; + } + + extension = ma_path_file_name(path); + lastOccurance = NULL; + + /* Just find the last '.' and return. */ + while (extension[0] != '\0') { + if (extension[0] == '.') { + extension += 1; + lastOccurance = extension; + } + + extension += 1; + } + + return (lastOccurance != NULL) ? lastOccurance : extension; +} + +static const wchar_t* ma_path_extension_w(const wchar_t* path) +{ + const wchar_t* extension; + const wchar_t* lastOccurance; + + if (path == NULL) { + path = L""; + } + + extension = ma_path_file_name_w(path); + lastOccurance = NULL; + + /* Just find the last '.' and return. */ + while (extension[0] != '\0') { + if (extension[0] == '.') { + extension += 1; + lastOccurance = extension; + } + + extension += 1; + } + + return (lastOccurance != NULL) ? lastOccurance : extension; +} + + +static ma_bool32 ma_path_extension_equal(const char* path, const char* extension) +{ + const char* ext1; + const char* ext2; + + if (path == NULL || extension == NULL) { + return MA_FALSE; + } + + ext1 = extension; + ext2 = ma_path_extension(path); + +#if defined(_MSC_VER) || defined(__DMC__) + return _stricmp(ext1, ext2) == 0; +#else + return strcasecmp(ext1, ext2) == 0; +#endif +} + +static ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extension) +{ + const wchar_t* ext1; + const wchar_t* ext2; + + if (path == NULL || extension == NULL) { + return MA_FALSE; + } + + ext1 = extension; + ext2 = ma_path_extension_w(path); + +#if defined(_MSC_VER) || defined(__WATCOMC__) || defined(__DMC__) + return _wcsicmp(ext1, ext2) == 0; +#else + /* + I'm not aware of a wide character version of strcasecmp(). I'm therefore converting the extensions to multibyte strings and comparing those. This + isn't the most efficient way to do it, but it should work OK. + */ + { + char ext1MB[4096]; + char ext2MB[4096]; + const wchar_t* pext1 = ext1; + const wchar_t* pext2 = ext2; + mbstate_t mbs1; + mbstate_t mbs2; + + MA_ZERO_OBJECT(&mbs1); + MA_ZERO_OBJECT(&mbs2); + + if (wcsrtombs(ext1MB, &pext1, sizeof(ext1MB), &mbs1) == (size_t)-1) { + return MA_FALSE; + } + if (wcsrtombs(ext2MB, &pext2, sizeof(ext2MB), &mbs2) == (size_t)-1) { + return MA_FALSE; + } + + return strcasecmp(ext1MB, ext2MB) == 0; + } +#endif +} +#endif /* MA_HAS_PATH_API */ + + + +static ma_result ma_decoder__on_read_vfs(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pBufferOut != NULL); + + return ma_vfs_or_default_read(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file, pBufferOut, bytesToRead, pBytesRead); +} + +static ma_result ma_decoder__on_seek_vfs(ma_decoder* pDecoder, ma_int64 offset, ma_seek_origin origin) +{ + MA_ASSERT(pDecoder != NULL); + + return ma_vfs_or_default_seek(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file, offset, origin); +} + +static ma_result ma_decoder__on_tell_vfs(ma_decoder* pDecoder, ma_int64* pCursor) +{ + MA_ASSERT(pDecoder != NULL); + + return ma_vfs_or_default_tell(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file, pCursor); +} + +static ma_result ma_decoder__preinit_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_decoder__preinit(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, ma_decoder__on_tell_vfs, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + result = ma_vfs_or_default_open(pVFS, pFilePath, MA_OPEN_MODE_READ, &file); + if (result != MA_SUCCESS) { + return result; + } + + pDecoder->data.vfs.pVFS = pVFS; + pDecoder->data.vfs.file = file; + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_vfs(pVFS, pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { +#ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis__internal(&config, pDecoder); + } +#endif + + /* Make sure we seek back to the start if we didn't initialize a decoder successfully so the next attempts have a fresh start. */ + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + if (result != MA_SUCCESS) { + result = ma_decoder_init_custom__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "wav")) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "flac")) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "mp3")) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } +#endif + } + + /* If we still haven't got a result just use trial and error. Otherwise we can finish up. */ + if (result != MA_SUCCESS) { + result = ma_decoder_init__internal(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, NULL, &config, pDecoder); + } else { + result = ma_decoder__postinit(&config, pDecoder); + } + + if (result != MA_SUCCESS) { + if (pDecoder->data.vfs.file != NULL) { /* <-- Will be reset to NULL if ma_decoder_uninit() is called in one of the steps above which allows us to avoid a double close of the file. */ + ma_vfs_or_default_close(pVFS, pDecoder->data.vfs.file); + } + + return result; + } + + return MA_SUCCESS; +} + + +static ma_result ma_decoder__preinit_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_decoder__preinit(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, ma_decoder__on_tell_vfs, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + result = ma_vfs_or_default_open_w(pVFS, pFilePath, MA_OPEN_MODE_READ, &file); + if (result != MA_SUCCESS) { + return result; + } + + pDecoder->data.vfs.pVFS = pVFS; + pDecoder->data.vfs.file = file; + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_vfs_w(pVFS, pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { +#ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis__internal(&config, pDecoder); + } +#endif + + /* Make sure we seek back to the start if we didn't initialize a decoder successfully so the next attempts have a fresh start. */ + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + if (result != MA_SUCCESS) { + result = ma_decoder_init_custom__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"wav")) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"flac")) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"mp3")) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } +#endif + } + + /* If we still haven't got a result just use trial and error. Otherwise we can finish up. */ + if (result != MA_SUCCESS) { + result = ma_decoder_init__internal(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, NULL, &config, pDecoder); + } else { + result = ma_decoder__postinit(&config, pDecoder); + } + + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, pDecoder->data.vfs.file); + return result; + } + + return MA_SUCCESS; +} + + +static ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__preinit(NULL, NULL, NULL, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_file(pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + /* If the backend has support for loading from a file path we'll want to use that. If that all fails we'll fall back to the VFS path. */ + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { +#ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis_from_file__internal(pFilePath, &config, pDecoder); + } +#endif + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + result = ma_decoder_init_custom_from_file__internal(pFilePath, &config, pDecoder); + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (result != MA_SUCCESS && config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + /* First try loading based on the file extension so we don't waste time opening and closing files. */ +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "wav")) { + result = ma_decoder_init_wav_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "flac")) { + result = ma_decoder_init_flac_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "mp3")) { + result = ma_decoder_init_mp3_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "ogg")) { + result = ma_decoder_init_vorbis_from_file__internal(pFilePath, &config, pDecoder); + } +#endif + + /* + If we still haven't got a result just use trial and error. Custom decoders have already been attempted, so here we + need only iterate over our stock decoders. + */ + if (result != MA_SUCCESS) { +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3_from_file__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis_from_file__internal(pFilePath, &config, pDecoder); + } +#endif + } + } + + /* + If at this point we still haven't successfully initialized the decoder it most likely means + the backend doesn't have an implementation for loading from a file path. We'll try using + miniaudio's built-in file IO for loading file. + */ + if (result == MA_SUCCESS) { + /* Initialization was successful. Finish up. */ + result = ma_decoder__postinit(&config, pDecoder); + if (result != MA_SUCCESS) { + /* + The backend was initialized successfully, but for some reason post-initialization failed. This is most likely + due to an out of memory error. We're going to abort with an error here and not try to recover. + */ + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, &pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + + return result; + } + } else { + /* Probably no implementation for loading from a file path. Use miniaudio's file IO instead. */ + result = ma_decoder_init_vfs(NULL, pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return MA_SUCCESS; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__preinit_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__preinit(NULL, NULL, NULL, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_file_w(pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + /* If the backend has support for loading from a file path we'll want to use that. If that all fails we'll fall back to the VFS path. */ + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { +#ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + result = ma_decoder_init_custom_from_file_w__internal(pFilePath, &config, pDecoder); + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (result != MA_SUCCESS && config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + /* First try loading based on the file extension so we don't waste time opening and closing files. */ +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"wav")) { + result = ma_decoder_init_wav_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"flac")) { + result = ma_decoder_init_flac_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"mp3")) { + result = ma_decoder_init_mp3_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"ogg")) { + result = ma_decoder_init_vorbis_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif + + /* + If we still haven't got a result just use trial and error. Custom decoders have already been attempted, so here we + need only iterate over our stock decoders. + */ + if (result != MA_SUCCESS) { +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif +#ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis_from_file_w__internal(pFilePath, &config, pDecoder); + } +#endif + } + } + + /* + If at this point we still haven't successfully initialized the decoder it most likely means + the backend doesn't have an implementation for loading from a file path. We'll try using + miniaudio's built-in file IO for loading file. + */ + if (result == MA_SUCCESS) { + /* Initialization was successful. Finish up. */ + result = ma_decoder__postinit(&config, pDecoder); + if (result != MA_SUCCESS) { + /* + The backend was initialized successfully, but for some reason post-initialization failed. This is most likely + due to an out of memory error. We're going to abort with an error here and not try to recover. + */ + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, &pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + + return result; + } + } else { + /* Probably no implementation for loading from a file path. Use miniaudio's file IO instead. */ + result = ma_decoder_init_vfs_w(NULL, pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return MA_SUCCESS; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_uninit(ma_decoder* pDecoder) +{ + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend != NULL) { + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + } + + if (pDecoder->onRead == ma_decoder__on_read_vfs) { + ma_vfs_or_default_close(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file); + pDecoder->data.vfs.file = NULL; + } + + ma_data_converter_uninit(&pDecoder->converter, &pDecoder->allocationCallbacks); + ma_data_source_uninit(&pDecoder->ds); + + if (pDecoder->pInputCache != NULL) { + ma_free(pDecoder->pInputCache, &pDecoder->allocationCallbacks); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesReadOut; + void* pRunningFramesOut; + + if (pFramesRead != NULL) { + *pFramesRead = 0; /* Safety. */ + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend == NULL) { + return MA_INVALID_OPERATION; + } + + /* Fast path. */ + if (pDecoder->converter.isPassthrough) { + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, pFramesOut, frameCount, &totalFramesReadOut); + } else { + /* + Getting here means we need to do data conversion. If we're seeking forward and are _not_ doing resampling we can run this in a fast path. If we're doing resampling we + need to run through each sample because we need to ensure it's internal cache is updated. + */ + if (pFramesOut == NULL && pDecoder->converter.hasResampler == MA_FALSE) { + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, NULL, frameCount, &totalFramesReadOut); + } else { + /* Slow path. Need to run everything through the data converter. */ + ma_format internalFormat; + ma_uint32 internalChannels; + + totalFramesReadOut = 0; + pRunningFramesOut = pFramesOut; + + result = ma_data_source_get_data_format(pDecoder->pBackend, &internalFormat, &internalChannels, NULL, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal format and channel count. */ + } + + /* + We run a different path depending on whether or not we are using a heap-allocated + intermediary buffer or not. If the data converter does not support the calculation of + the required number of input frames, we'll use the heap-allocated path. Otherwise we'll + use the stack-allocated path. + */ + if (pDecoder->pInputCache != NULL) { + /* We don't have a way of determining the required number of input frames, so need to persistently store input data in a cache. */ + while (totalFramesReadOut < frameCount) { + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + + /* If there's any data available in the cache, that needs to get processed first. */ + if (pDecoder->inputCacheRemaining > 0) { + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > pDecoder->inputCacheRemaining) { + framesToReadThisIterationIn = pDecoder->inputCacheRemaining; + } + + result = ma_data_converter_process_pcm_frames(&pDecoder->converter, ma_offset_pcm_frames_ptr(pDecoder->pInputCache, pDecoder->inputCacheConsumed, internalFormat, internalChannels), &framesToReadThisIterationIn, pRunningFramesOut, &framesToReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + pDecoder->inputCacheConsumed += framesToReadThisIterationIn; + pDecoder->inputCacheRemaining -= framesToReadThisIterationIn; + + totalFramesReadOut += framesToReadThisIterationOut; + + if (pRunningFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesToReadThisIterationOut * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels)); + } + + if (framesToReadThisIterationIn == 0 && framesToReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + + /* Getting here means there's no data in the cache and we need to fill it up from the data source. */ + if (pDecoder->inputCacheRemaining == 0) { + pDecoder->inputCacheConsumed = 0; + + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, pDecoder->pInputCache, pDecoder->inputCacheCap, &pDecoder->inputCacheRemaining); + if (result != MA_SUCCESS) { + break; + } + } + } + } else { + /* We have a way of determining the required number of input frames so just use the stack. */ + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In internal format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(internalFormat, internalChannels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; + + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } + + ma_data_converter_get_required_input_frame_count(&pDecoder->converter, framesToReadThisIterationOut, &requiredInputFrameCount); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + + if (requiredInputFrameCount > 0) { + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, pIntermediaryBuffer, framesToReadThisIterationIn, &framesReadThisIterationIn); + } else { + framesReadThisIterationIn = 0; + } + + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDecoder->converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + totalFramesReadOut += framesReadThisIterationOut; + + if (pRunningFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels)); + } + + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + } + } + } + + pDecoder->readPointerInPCMFrames += totalFramesReadOut; + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesReadOut; + } + + if (result == MA_SUCCESS && totalFramesReadOut == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend != NULL) { + ma_result result; + ma_uint64 internalFrameIndex; + ma_uint32 internalSampleRate; + ma_uint64 currentFrameIndex; + + result = ma_data_source_get_data_format(pDecoder->pBackend, NULL, NULL, &internalSampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal sample rate. */ + } + + if (internalSampleRate == pDecoder->outputSampleRate) { + internalFrameIndex = frameIndex; + } else { + internalFrameIndex = ma_calculate_frame_count_after_resampling(internalSampleRate, pDecoder->outputSampleRate, frameIndex); + } + + /* Only seek if we're requesting a different frame to what we're currently sitting on. */ + ma_data_source_get_cursor_in_pcm_frames(pDecoder->pBackend, ¤tFrameIndex); + if (currentFrameIndex != internalFrameIndex) { + result = ma_data_source_seek_to_pcm_frame(pDecoder->pBackend, internalFrameIndex); + if (result == MA_SUCCESS) { + pDecoder->readPointerInPCMFrames = frameIndex; + } + + /* Reset the data converter so that any cached data in the resampler is cleared. */ + ma_data_converter_reset(&pDecoder->converter); + } + + return result; + } + + /* Should never get here, but if we do it means onSeekToPCMFrame was not set by the backend. */ + return MA_INVALID_ARGS; +} + +MA_API ma_result ma_decoder_get_data_format(ma_decoder* pDecoder, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pFormat != NULL) { + *pFormat = pDecoder->outputFormat; + } + + if (pChannels != NULL) { + *pChannels = pDecoder->outputChannels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pDecoder->outputSampleRate; + } + + if (pChannelMap != NULL) { + ma_data_converter_get_output_channel_map(&pDecoder->converter, pChannelMap, channelMapCap); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_get_cursor_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = pDecoder->readPointerInPCMFrames; + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend != NULL) { + ma_result result; + ma_uint64 internalLengthInPCMFrames; + ma_uint32 internalSampleRate; + + result = ma_data_source_get_length_in_pcm_frames(pDecoder->pBackend, &internalLengthInPCMFrames); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal length. */ + } + + result = ma_data_source_get_data_format(pDecoder->pBackend, NULL, NULL, &internalSampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal sample rate. */ + } + + if (internalSampleRate == pDecoder->outputSampleRate) { + *pLength = internalLengthInPCMFrames; + } else { + *pLength = ma_calculate_frame_count_after_resampling(pDecoder->outputSampleRate, internalSampleRate, internalLengthInPCMFrames); + } + + return MA_SUCCESS; + } else { + return MA_NO_BACKEND; + } +} + +MA_API ma_result ma_decoder_get_available_frames(ma_decoder* pDecoder, ma_uint64* pAvailableFrames) +{ + ma_result result; + ma_uint64 totalFrameCount; + + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_decoder_get_length_in_pcm_frames(pDecoder, &totalFrameCount); + if (result != MA_SUCCESS) { + return result; + } + + if (totalFrameCount <= pDecoder->readPointerInPCMFrames) { + *pAvailableFrames = 0; + } else { + *pAvailableFrames = totalFrameCount - pDecoder->readPointerInPCMFrames; + } + + return MA_SUCCESS; +} + + +static ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_config* pConfigOut, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_result result; + ma_uint64 totalFrameCount; + ma_uint64 bpf; + ma_uint64 dataCapInFrames; + void* pPCMFramesOut; + + MA_ASSERT(pDecoder != NULL); + + totalFrameCount = 0; + bpf = ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels); + + /* The frame count is unknown until we try reading. Thus, we just run in a loop. */ + dataCapInFrames = 0; + pPCMFramesOut = NULL; + for (;;) { + ma_uint64 frameCountToTryReading; + ma_uint64 framesJustRead; + + /* Make room if there's not enough. */ + if (totalFrameCount == dataCapInFrames) { + void* pNewPCMFramesOut; + ma_uint64 newDataCapInFrames = dataCapInFrames*2; + if (newDataCapInFrames == 0) { + newDataCapInFrames = 4096; + } + + if ((newDataCapInFrames * bpf) > MA_SIZE_MAX) { + ma_free(pPCMFramesOut, &pDecoder->allocationCallbacks); + return MA_TOO_BIG; + } + + pNewPCMFramesOut = (void*)ma_realloc(pPCMFramesOut, (size_t)(newDataCapInFrames * bpf), &pDecoder->allocationCallbacks); + if (pNewPCMFramesOut == NULL) { + ma_free(pPCMFramesOut, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + dataCapInFrames = newDataCapInFrames; + pPCMFramesOut = pNewPCMFramesOut; + } + + frameCountToTryReading = dataCapInFrames - totalFrameCount; + MA_ASSERT(frameCountToTryReading > 0); + + result = ma_decoder_read_pcm_frames(pDecoder, (ma_uint8*)pPCMFramesOut + (totalFrameCount * bpf), frameCountToTryReading, &framesJustRead); + totalFrameCount += framesJustRead; + + if (result != MA_SUCCESS) { + break; + } + + if (framesJustRead < frameCountToTryReading) { + break; + } + } + + + if (pConfigOut != NULL) { + pConfigOut->format = pDecoder->outputFormat; + pConfigOut->channels = pDecoder->outputChannels; + pConfigOut->sampleRate = pDecoder->outputSampleRate; + } + + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = pPCMFramesOut; + } else { + ma_free(pPCMFramesOut, &pDecoder->allocationCallbacks); + } + + if (pFrameCountOut != NULL) { + *pFrameCountOut = totalFrameCount; + } + + ma_decoder_uninit(pDecoder); + return MA_SUCCESS; +} + +MA_API ma_result ma_decode_from_vfs(ma_vfs* pVFS, const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_result result; + ma_decoder_config config; + ma_decoder decoder; + + if (pFrameCountOut != NULL) { + *pFrameCountOut = 0; + } + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = NULL; + } + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder_init_vfs(pVFS, pFilePath, &config, &decoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut); + + return result; +} + +MA_API ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + return ma_decode_from_vfs(NULL, pFilePath, pConfig, pFrameCountOut, ppPCMFramesOut); +} + +MA_API ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_decoder_config config; + ma_decoder decoder; + ma_result result; + + if (pFrameCountOut != NULL) { + *pFrameCountOut = 0; + } + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = NULL; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder_init_memory(pData, dataSize, &config, &decoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut); +} +#endif /* MA_NO_DECODING */ + + +#ifndef MA_NO_ENCODING + +#if defined(MA_HAS_WAV) +static size_t ma_encoder__internal_on_write_wav(void* pUserData, const void* pData, size_t bytesToWrite) +{ + ma_encoder* pEncoder = (ma_encoder*)pUserData; + size_t bytesWritten = 0; + + MA_ASSERT(pEncoder != NULL); + + pEncoder->onWrite(pEncoder, pData, bytesToWrite, &bytesWritten); + return bytesWritten; +} + +static ma_bool32 ma_encoder__internal_on_seek_wav(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_encoder* pEncoder = (ma_encoder*)pUserData; + ma_result result; + + MA_ASSERT(pEncoder != NULL); + + result = pEncoder->onSeek(pEncoder, offset, (origin == ma_dr_wav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); + if (result != MA_SUCCESS) { + return MA_FALSE; + } else { + return MA_TRUE; + } +} + +static ma_result ma_encoder__on_init_wav(ma_encoder* pEncoder) +{ + ma_dr_wav_data_format wavFormat; + ma_allocation_callbacks allocationCallbacks; + ma_dr_wav* pWav; + + MA_ASSERT(pEncoder != NULL); + + pWav = (ma_dr_wav*)ma_malloc(sizeof(*pWav), &pEncoder->config.allocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + wavFormat.container = ma_dr_wav_container_riff; + wavFormat.channels = pEncoder->config.channels; + wavFormat.sampleRate = pEncoder->config.sampleRate; + wavFormat.bitsPerSample = ma_get_bytes_per_sample(pEncoder->config.format) * 8; + if (pEncoder->config.format == ma_format_f32) { + wavFormat.format = MA_DR_WAVE_FORMAT_IEEE_FLOAT; + } else { + wavFormat.format = MA_DR_WAVE_FORMAT_PCM; + } + + allocationCallbacks.pUserData = pEncoder->config.allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pEncoder->config.allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pEncoder->config.allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pEncoder->config.allocationCallbacks.onFree; + + if (!ma_dr_wav_init_write(pWav, &wavFormat, ma_encoder__internal_on_write_wav, ma_encoder__internal_on_seek_wav, pEncoder, &allocationCallbacks)) { + return MA_ERROR; + } + + pEncoder->pInternalEncoder = pWav; + + return MA_SUCCESS; +} + +static void ma_encoder__on_uninit_wav(ma_encoder* pEncoder) +{ + ma_dr_wav* pWav; + + MA_ASSERT(pEncoder != NULL); + + pWav = (ma_dr_wav*)pEncoder->pInternalEncoder; + MA_ASSERT(pWav != NULL); + + ma_dr_wav_uninit(pWav); + ma_free(pWav, &pEncoder->config.allocationCallbacks); +} + +static ma_result ma_encoder__on_write_pcm_frames_wav(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten) +{ + ma_dr_wav* pWav; + ma_uint64 framesWritten; + + MA_ASSERT(pEncoder != NULL); + + pWav = (ma_dr_wav*)pEncoder->pInternalEncoder; + MA_ASSERT(pWav != NULL); + + framesWritten = ma_dr_wav_write_pcm_frames(pWav, frameCount, pFramesIn); + + if (pFramesWritten != NULL) { + *pFramesWritten = framesWritten; + } + + return MA_SUCCESS; +} +#endif + +MA_API ma_encoder_config ma_encoder_config_init(ma_encoding_format encodingFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + ma_encoder_config config; + + MA_ZERO_OBJECT(&config); + config.encodingFormat = encodingFormat; + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + + return config; +} + +MA_API ma_result ma_encoder_preinit(const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + + if (pEncoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pEncoder); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->format == ma_format_unknown || pConfig->channels == 0 || pConfig->sampleRate == 0) { + return MA_INVALID_ARGS; + } + + pEncoder->config = *pConfig; + + result = ma_allocation_callbacks_init_copy(&pEncoder->config.allocationCallbacks, &pConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_encoder_init__internal(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, ma_encoder* pEncoder) +{ + ma_result result = MA_SUCCESS; + + /* This assumes ma_encoder_preinit() has been called prior. */ + MA_ASSERT(pEncoder != NULL); + + if (onWrite == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; + } + + pEncoder->onWrite = onWrite; + pEncoder->onSeek = onSeek; + pEncoder->pUserData = pUserData; + + switch (pEncoder->config.encodingFormat) + { + case ma_encoding_format_wav: + { +#if defined(MA_HAS_WAV) + pEncoder->onInit = ma_encoder__on_init_wav; + pEncoder->onUninit = ma_encoder__on_uninit_wav; + pEncoder->onWritePCMFrames = ma_encoder__on_write_pcm_frames_wav; +#else + result = MA_NO_BACKEND; +#endif + } break; + + default: + { + result = MA_INVALID_ARGS; + } break; + } + + /* Getting here means we should have our backend callbacks set up. */ + if (result == MA_SUCCESS) { + result = pEncoder->onInit(pEncoder); + } + + return result; +} + +static ma_result ma_encoder__on_write_vfs(ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite, size_t* pBytesWritten) +{ + return ma_vfs_or_default_write(pEncoder->data.vfs.pVFS, pEncoder->data.vfs.file, pBufferIn, bytesToWrite, pBytesWritten); +} + +static ma_result ma_encoder__on_seek_vfs(ma_encoder* pEncoder, ma_int64 offset, ma_seek_origin origin) +{ + return ma_vfs_or_default_seek(pEncoder->data.vfs.pVFS, pEncoder->data.vfs.file, offset, origin); +} + +MA_API ma_result ma_encoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + /* Now open the file. If this fails we don't need to uninitialize the encoder. */ + result = ma_vfs_or_default_open(pVFS, pFilePath, MA_OPEN_MODE_WRITE, &file); + if (result != MA_SUCCESS) { + return result; + } + + pEncoder->data.vfs.pVFS = pVFS; + pEncoder->data.vfs.file = file; + + result = ma_encoder_init__internal(ma_encoder__on_write_vfs, ma_encoder__on_seek_vfs, NULL, pEncoder); + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_encoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + /* Now open the file. If this fails we don't need to uninitialize the encoder. */ + result = ma_vfs_or_default_open_w(pVFS, pFilePath, MA_OPEN_MODE_WRITE, &file); + if (result != MA_SUCCESS) { + return result; + } + + pEncoder->data.vfs.pVFS = pVFS; + pEncoder->data.vfs.file = file; + + result = ma_encoder_init__internal(ma_encoder__on_write_vfs, ma_encoder__on_seek_vfs, NULL, pEncoder); + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + return ma_encoder_init_vfs(NULL, pFilePath, pConfig, pEncoder); +} + +MA_API ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + return ma_encoder_init_vfs_w(NULL, pFilePath, pConfig, pEncoder); +} + +MA_API ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_encoder_init__internal(onWrite, onSeek, pUserData, pEncoder); +} + + +MA_API void ma_encoder_uninit(ma_encoder* pEncoder) +{ + if (pEncoder == NULL) { + return; + } + + if (pEncoder->onUninit) { + pEncoder->onUninit(pEncoder); + } + + /* If we have a file handle, close it. */ + if (pEncoder->onWrite == ma_encoder__on_write_vfs) { + ma_vfs_or_default_close(pEncoder->data.vfs.pVFS, pEncoder->data.vfs.file); + pEncoder->data.vfs.file = NULL; + } +} + + +MA_API ma_result ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten) +{ + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + if (pEncoder == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + return pEncoder->onWritePCMFrames(pEncoder, pFramesIn, frameCount, pFramesWritten); +} +#endif /* MA_NO_ENCODING */ + + + +/************************************************************************************************************************************************************** + +Generation + +**************************************************************************************************************************************************************/ +#ifndef MA_NO_GENERATION +MA_API ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency) +{ + ma_waveform_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.type = type; + config.amplitude = amplitude; + config.frequency = frequency; + + return config; +} + +static ma_result ma_waveform__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_waveform_read_pcm_frames((ma_waveform*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_waveform__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_waveform_seek_to_pcm_frame((ma_waveform*)pDataSource, frameIndex); +} + +static ma_result ma_waveform__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_waveform* pWaveform = (ma_waveform*)pDataSource; + + *pFormat = pWaveform->config.format; + *pChannels = pWaveform->config.channels; + *pSampleRate = pWaveform->config.sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pWaveform->config.channels); + + return MA_SUCCESS; +} + +static ma_result ma_waveform__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + ma_waveform* pWaveform = (ma_waveform*)pDataSource; + + *pCursor = (ma_uint64)(pWaveform->time / pWaveform->advance); + + return MA_SUCCESS; +} + +static double ma_waveform__calculate_advance(ma_uint32 sampleRate, double frequency) +{ + return (1.0 / (sampleRate / frequency)); +} + +static void ma_waveform__update_advance(ma_waveform* pWaveform) +{ + pWaveform->advance = ma_waveform__calculate_advance(pWaveform->config.sampleRate, pWaveform->config.frequency); +} + +static ma_data_source_vtable g_ma_waveform_data_source_vtable = +{ + ma_waveform__data_source_on_read, + ma_waveform__data_source_on_seek, + ma_waveform__data_source_on_get_data_format, + ma_waveform__data_source_on_get_cursor, + NULL, /* onGetLength. There's no notion of a length in waveforms. */ + NULL, /* onSetLooping */ + 0 +}; + +MA_API ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pWaveform); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_waveform_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pWaveform->ds); + if (result != MA_SUCCESS) { + return result; + } + + pWaveform->config = *pConfig; + pWaveform->advance = ma_waveform__calculate_advance(pWaveform->config.sampleRate, pWaveform->config.frequency); + pWaveform->time = 0; + + return MA_SUCCESS; +} + +MA_API void ma_waveform_uninit(ma_waveform* pWaveform) +{ + if (pWaveform == NULL) { + return; + } + + ma_data_source_uninit(&pWaveform->ds); +} + +MA_API ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.amplitude = amplitude; + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.frequency = frequency; + ma_waveform__update_advance(pWaveform); + + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_type(ma_waveform* pWaveform, ma_waveform_type type) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.type = type; + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.sampleRate = sampleRate; + ma_waveform__update_advance(pWaveform); + + return MA_SUCCESS; +} + +static float ma_waveform_sine_f32(double time, double amplitude) +{ + return (float)(ma_sind(MA_TAU_D * time) * amplitude); +} + +static ma_int16 ma_waveform_sine_s16(double time, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_sine_f32(time, amplitude)); +} + +static float ma_waveform_square_f32(double time, double dutyCycle, double amplitude) +{ + double f = time - (ma_int64)time; + double r; + + if (f < dutyCycle) { + r = amplitude; + } else { + r = -amplitude; + } + + return (float)r; +} + +static ma_int16 ma_waveform_square_s16(double time, double dutyCycle, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_square_f32(time, dutyCycle, amplitude)); +} + +static float ma_waveform_triangle_f32(double time, double amplitude) +{ + double f = time - (ma_int64)time; + double r; + + r = 2 * ma_abs(2 * (f - 0.5)) - 1; + + return (float)(r * amplitude); +} + +static ma_int16 ma_waveform_triangle_s16(double time, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_triangle_f32(time, amplitude)); +} + +static float ma_waveform_sawtooth_f32(double time, double amplitude) +{ + double f = time - (ma_int64)time; + double r; + + r = 2 * (f - 0.5); + + return (float)(r * amplitude); +} + +static ma_int16 ma_waveform_sawtooth_s16(double time, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_sawtooth_f32(time, amplitude)); +} + +static void ma_waveform_read_pcm_frames__sine(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_sine_s16(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__square(ma_waveform* pWaveform, double dutyCycle, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_square_f32(pWaveform->time, dutyCycle, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_square_s16(pWaveform->time, dutyCycle, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_square_f32(pWaveform->time, dutyCycle, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__triangle(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_triangle_s16(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__sawtooth(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_sawtooth_s16(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +MA_API ma_result ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut != NULL) { + switch (pWaveform->config.type) + { + case ma_waveform_type_sine: + { + ma_waveform_read_pcm_frames__sine(pWaveform, pFramesOut, frameCount); + } break; + + case ma_waveform_type_square: + { + ma_waveform_read_pcm_frames__square(pWaveform, 0.5, pFramesOut, frameCount); + } break; + + case ma_waveform_type_triangle: + { + ma_waveform_read_pcm_frames__triangle(pWaveform, pFramesOut, frameCount); + } break; + + case ma_waveform_type_sawtooth: + { + ma_waveform_read_pcm_frames__sawtooth(pWaveform, pFramesOut, frameCount); + } break; + + default: return MA_INVALID_OPERATION; /* Unknown waveform type. */ + } + } else { + pWaveform->time += pWaveform->advance * (ma_int64)frameCount; /* Cast to int64 required for VC6. Won't affect anything in practice. */ + } + + if (pFramesRead != NULL) { + *pFramesRead = frameCount; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_seek_to_pcm_frame(ma_waveform* pWaveform, ma_uint64 frameIndex) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->time = pWaveform->advance * (ma_int64)frameIndex; /* Casting for VC6. Won't be an issue in practice. */ + + return MA_SUCCESS; +} + +MA_API ma_pulsewave_config ma_pulsewave_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double dutyCycle, double amplitude, double frequency) +{ + ma_pulsewave_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.dutyCycle = dutyCycle; + config.amplitude = amplitude; + config.frequency = frequency; + + return config; +} + +MA_API ma_result ma_pulsewave_init(const ma_pulsewave_config* pConfig, ma_pulsewave* pWaveform) +{ + ma_result result; + ma_waveform_config config; + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pWaveform); + + config = ma_waveform_config_init( + pConfig->format, + pConfig->channels, + pConfig->sampleRate, + ma_waveform_type_square, + pConfig->amplitude, + pConfig->frequency + ); + + result = ma_waveform_init(&config, &pWaveform->waveform); + ma_pulsewave_set_duty_cycle(pWaveform, pConfig->dutyCycle); + + return result; +} + +MA_API void ma_pulsewave_uninit(ma_pulsewave* pWaveform) +{ + if (pWaveform == NULL) { + return; + } + + ma_waveform_uninit(&pWaveform->waveform); +} + +MA_API ma_result ma_pulsewave_read_pcm_frames(ma_pulsewave* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut != NULL) { + ma_waveform_read_pcm_frames__square(&pWaveform->waveform, pWaveform->config.dutyCycle, pFramesOut, frameCount); + } else { + pWaveform->waveform.time += pWaveform->waveform.advance * (ma_int64)frameCount; /* Cast to int64 required for VC6. Won't affect anything in practice. */ + } + + if (pFramesRead != NULL) { + *pFramesRead = frameCount; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_seek_to_pcm_frame(ma_pulsewave* pWaveform, ma_uint64 frameIndex) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + ma_waveform_seek_to_pcm_frame(&pWaveform->waveform, frameIndex); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_amplitude(ma_pulsewave* pWaveform, double amplitude) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.amplitude = amplitude; + ma_waveform_set_amplitude(&pWaveform->waveform, amplitude); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_frequency(ma_pulsewave* pWaveform, double frequency) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.frequency = frequency; + ma_waveform_set_frequency(&pWaveform->waveform, frequency); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_sample_rate(ma_pulsewave* pWaveform, ma_uint32 sampleRate) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.sampleRate = sampleRate; + ma_waveform_set_sample_rate(&pWaveform->waveform, sampleRate); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_duty_cycle(ma_pulsewave* pWaveform, double dutyCycle) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.dutyCycle = dutyCycle; + + return MA_SUCCESS; +} + + + +MA_API ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude) +{ + ma_noise_config config; + MA_ZERO_OBJECT(&config); + + config.format = format; + config.channels = channels; + config.type = type; + config.seed = seed; + config.amplitude = amplitude; + + if (config.seed == 0) { + config.seed = MA_DEFAULT_LCG_SEED; + } + + return config; +} + + +static ma_result ma_noise__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_noise_read_pcm_frames((ma_noise*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_noise__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + /* No-op. Just pretend to be successful. */ + (void)pDataSource; + (void)frameIndex; + return MA_SUCCESS; +} + +static ma_result ma_noise__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_noise* pNoise = (ma_noise*)pDataSource; + + *pFormat = pNoise->config.format; + *pChannels = pNoise->config.channels; + *pSampleRate = 0; /* There is no notion of sample rate with noise generation. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pNoise->config.channels); + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_noise_data_source_vtable = +{ + ma_noise__data_source_on_read, + ma_noise__data_source_on_seek, /* No-op for noise. */ + ma_noise__data_source_on_get_data_format, + NULL, /* onGetCursor. No notion of a cursor for noise. */ + NULL, /* onGetLength. No notion of a length for noise. */ + NULL, /* onSetLooping */ + 0 +}; + + +#ifndef MA_PINK_NOISE_BIN_SIZE +#define MA_PINK_NOISE_BIN_SIZE 16 +#endif + +typedef struct +{ + size_t sizeInBytes; + struct + { + size_t binOffset; + size_t accumulationOffset; + size_t counterOffset; + } pink; + struct + { + size_t accumulationOffset; + } brownian; +} ma_noise_heap_layout; + +static ma_result ma_noise_get_heap_layout(const ma_noise_config* pConfig, ma_noise_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Pink. */ + if (pConfig->type == ma_noise_type_pink) { + /* bin */ + pHeapLayout->pink.binOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(double*) * pConfig->channels; + pHeapLayout->sizeInBytes += sizeof(double ) * pConfig->channels * MA_PINK_NOISE_BIN_SIZE; + + /* accumulation */ + pHeapLayout->pink.accumulationOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(double) * pConfig->channels; + + /* counter */ + pHeapLayout->pink.counterOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_uint32) * pConfig->channels; + } + + /* Brownian. */ + if (pConfig->type == ma_noise_type_brownian) { + /* accumulation */ + pHeapLayout->brownian.accumulationOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(double) * pConfig->channels; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_get_heap_size(const ma_noise_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_noise_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_noise_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_init_preallocated(const ma_noise_config* pConfig, void* pHeap, ma_noise* pNoise) +{ + ma_result result; + ma_noise_heap_layout heapLayout; + ma_data_source_config dataSourceConfig; + ma_uint32 iChannel; + + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNoise); + + result = ma_noise_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pNoise->_pHeap = pHeap; + MA_ZERO_MEMORY(pNoise->_pHeap, heapLayout.sizeInBytes); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_noise_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pNoise->ds); + if (result != MA_SUCCESS) { + return result; + } + + pNoise->config = *pConfig; + ma_lcg_seed(&pNoise->lcg, pConfig->seed); + + if (pNoise->config.type == ma_noise_type_pink) { + pNoise->state.pink.bin = (double** )ma_offset_ptr(pHeap, heapLayout.pink.binOffset); + pNoise->state.pink.accumulation = (double* )ma_offset_ptr(pHeap, heapLayout.pink.accumulationOffset); + pNoise->state.pink.counter = (ma_uint32*)ma_offset_ptr(pHeap, heapLayout.pink.counterOffset); + + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pNoise->state.pink.bin[iChannel] = (double*)ma_offset_ptr(pHeap, heapLayout.pink.binOffset + (sizeof(double*) * pConfig->channels) + (sizeof(double) * MA_PINK_NOISE_BIN_SIZE * iChannel)); + pNoise->state.pink.accumulation[iChannel] = 0; + pNoise->state.pink.counter[iChannel] = 1; + } + } + + if (pNoise->config.type == ma_noise_type_brownian) { + pNoise->state.brownian.accumulation = (double*)ma_offset_ptr(pHeap, heapLayout.brownian.accumulationOffset); + + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pNoise->state.brownian.accumulation[iChannel] = 0; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_init(const ma_noise_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_noise* pNoise) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_noise_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_noise_init_preallocated(pConfig, pHeap, pNoise); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pNoise->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_noise_uninit(ma_noise* pNoise, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pNoise == NULL) { + return; + } + + ma_data_source_uninit(&pNoise->ds); + + if (pNoise->_ownsHeap) { + ma_free(pNoise->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_noise_set_amplitude(ma_noise* pNoise, double amplitude) +{ + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + pNoise->config.amplitude = amplitude; + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_set_seed(ma_noise* pNoise, ma_int32 seed) +{ + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + pNoise->lcg.state = seed; + return MA_SUCCESS; +} + + +MA_API ma_result ma_noise_set_type(ma_noise* pNoise, ma_noise_type type) +{ + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + /* + This function should never have been implemented in the first place. Changing the type dynamically is not + supported. Instead you need to uninitialize and reinitiailize a fresh `ma_noise` object. This function + will be removed in version 0.12. + */ + MA_ASSERT(MA_FALSE); + (void)type; + + return MA_INVALID_OPERATION; +} + +static MA_INLINE float ma_noise_f32_white(ma_noise* pNoise) +{ + return (float)(ma_lcg_rand_f64(&pNoise->lcg) * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_white(ma_noise* pNoise) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_white(pNoise)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__white(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + const ma_uint32 channels = pNoise->config.channels; + MA_ASSUME(channels > 0); + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_white(pNoise); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = ma_noise_f32_white(pNoise); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_white(pNoise); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = ma_noise_s16_white(pNoise); + } + } + } + } else { + const ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + const ma_uint32 bpf = bps * channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_white(pNoise); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + float s = ma_noise_f32_white(pNoise); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + + +static MA_INLINE unsigned int ma_tzcnt32(unsigned int x) +{ + unsigned int n; + + /* Special case for odd numbers since they should happen about half the time. */ + if (x & 0x1) { + return 0; + } + + if (x == 0) { + return sizeof(x) << 3; + } + + n = 1; + if ((x & 0x0000FFFF) == 0) { x >>= 16; n += 16; } + if ((x & 0x000000FF) == 0) { x >>= 8; n += 8; } + if ((x & 0x0000000F) == 0) { x >>= 4; n += 4; } + if ((x & 0x00000003) == 0) { x >>= 2; n += 2; } + n -= x & 0x00000001; + + return n; +} + +/* +Pink noise generation based on Tonic (public domain) with modifications. https://github.com/TonicAudio/Tonic/blob/master/src/Tonic/Noise.h + +This is basically _the_ reference for pink noise from what I've found: http://www.firstpr.com.au/dsp/pink-noise/ +*/ +static MA_INLINE float ma_noise_f32_pink(ma_noise* pNoise, ma_uint32 iChannel) +{ + double result; + double binPrev; + double binNext; + unsigned int ibin; + + ibin = ma_tzcnt32(pNoise->state.pink.counter[iChannel]) & (MA_PINK_NOISE_BIN_SIZE - 1); + + binPrev = pNoise->state.pink.bin[iChannel][ibin]; + binNext = ma_lcg_rand_f64(&pNoise->lcg); + pNoise->state.pink.bin[iChannel][ibin] = binNext; + + pNoise->state.pink.accumulation[iChannel] += (binNext - binPrev); + pNoise->state.pink.counter[iChannel] += 1; + + result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.pink.accumulation[iChannel]); + result /= 10; + + return (float)(result * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_pink(ma_noise* pNoise, ma_uint32 iChannel) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_pink(pNoise, iChannel)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__pink(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + const ma_uint32 channels = pNoise->config.channels; + MA_ASSUME(channels > 0); + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_pink(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = ma_noise_f32_pink(pNoise, iChannel); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_pink(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = ma_noise_s16_pink(pNoise, iChannel); + } + } + } + } else { + const ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + const ma_uint32 bpf = bps * channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_pink(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + float s = ma_noise_f32_pink(pNoise, iChannel); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + + +static MA_INLINE float ma_noise_f32_brownian(ma_noise* pNoise, ma_uint32 iChannel) +{ + double result; + + result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.brownian.accumulation[iChannel]); + result /= 1.005; /* Don't escape the -1..1 range on average. */ + + pNoise->state.brownian.accumulation[iChannel] = result; + result /= 20; + + return (float)(result * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_brownian(ma_noise* pNoise, ma_uint32 iChannel) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_brownian(pNoise, iChannel)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__brownian(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + const ma_uint32 channels = pNoise->config.channels; + MA_ASSUME(channels > 0); + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_brownian(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = ma_noise_f32_brownian(pNoise, iChannel); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_brownian(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = ma_noise_s16_brownian(pNoise, iChannel); + } + } + } + } else { + const ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + const ma_uint32 bpf = bps * channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_brownian(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + float s = ma_noise_f32_brownian(pNoise, iChannel); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + +MA_API ma_result ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_uint64 framesRead = 0; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + /* The output buffer is allowed to be NULL. Since we aren't tracking cursors or anything we can just do nothing and pretend to be successful. */ + if (pFramesOut == NULL) { + framesRead = frameCount; + } else { + switch (pNoise->config.type) { + case ma_noise_type_white: framesRead = ma_noise_read_pcm_frames__white (pNoise, pFramesOut, frameCount); break; + case ma_noise_type_pink: framesRead = ma_noise_read_pcm_frames__pink (pNoise, pFramesOut, frameCount); break; + case ma_noise_type_brownian: framesRead = ma_noise_read_pcm_frames__brownian(pNoise, pFramesOut, frameCount); break; + default: return MA_INVALID_OPERATION; /* Unknown noise type. */ + } + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + return MA_SUCCESS; +} +#endif /* MA_NO_GENERATION */ + + + +#ifndef MA_NO_RESOURCE_MANAGER +#ifndef MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS +#define MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS 1000 +#endif + +#ifndef MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY +#define MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY 1024 +#endif + +MA_API ma_resource_manager_pipeline_notifications ma_resource_manager_pipeline_notifications_init(void) +{ + ma_resource_manager_pipeline_notifications notifications; + + MA_ZERO_OBJECT(¬ifications); + + return notifications; +} + +static void ma_resource_manager_pipeline_notifications_signal_all_notifications(const ma_resource_manager_pipeline_notifications* pPipelineNotifications) +{ + if (pPipelineNotifications == NULL) { + return; + } + + if (pPipelineNotifications->init.pNotification) { ma_async_notification_signal(pPipelineNotifications->init.pNotification); } + if (pPipelineNotifications->done.pNotification) { ma_async_notification_signal(pPipelineNotifications->done.pNotification); } +} + +static void ma_resource_manager_pipeline_notifications_acquire_all_fences(const ma_resource_manager_pipeline_notifications* pPipelineNotifications) +{ + if (pPipelineNotifications == NULL) { + return; + } + + if (pPipelineNotifications->init.pFence != NULL) { ma_fence_acquire(pPipelineNotifications->init.pFence); } + if (pPipelineNotifications->done.pFence != NULL) { ma_fence_acquire(pPipelineNotifications->done.pFence); } +} + +static void ma_resource_manager_pipeline_notifications_release_all_fences(const ma_resource_manager_pipeline_notifications* pPipelineNotifications) +{ + if (pPipelineNotifications == NULL) { + return; + } + + if (pPipelineNotifications->init.pFence != NULL) { ma_fence_release(pPipelineNotifications->init.pFence); } + if (pPipelineNotifications->done.pFence != NULL) { ma_fence_release(pPipelineNotifications->done.pFence); } +} + + + +#ifndef MA_DEFAULT_HASH_SEED +#define MA_DEFAULT_HASH_SEED 42 +#endif + +/* MurmurHash3. Based on code from https://github.com/PeterScott/murmur3/blob/master/murmur3.c (public domain). */ +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#if __GNUC__ >= 7 +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +#endif +#endif + +static MA_INLINE ma_uint32 ma_rotl32(ma_uint32 x, ma_int8 r) +{ + return (x << r) | (x >> (32 - r)); +} + +static MA_INLINE ma_uint32 ma_hash_getblock(const ma_uint32* blocks, int i) +{ + ma_uint32 block; + + /* Try silencing a sanitization warning about unaligned access by doing a memcpy() instead of assignment. */ + MA_COPY_MEMORY(&block, ma_offset_ptr(blocks, i * sizeof(block)), sizeof(block)); + + if (ma_is_little_endian()) { + return block; + } else { + return ma_swap_endian_uint32(block); + } +} + +static MA_INLINE ma_uint32 ma_hash_fmix32(ma_uint32 h) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +static ma_uint32 ma_hash_32(const void* key, int len, ma_uint32 seed) +{ + const ma_uint8* data = (const ma_uint8*)key; + const ma_uint32* blocks; + const ma_uint8* tail; + const int nblocks = len / 4; + ma_uint32 h1 = seed; + ma_uint32 c1 = 0xcc9e2d51; + ma_uint32 c2 = 0x1b873593; + ma_uint32 k1; + int i; + + blocks = (const ma_uint32 *)(data + nblocks*4); + + for(i = -nblocks; i; i++) { + k1 = ma_hash_getblock(blocks,i); + + k1 *= c1; + k1 = ma_rotl32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ma_rotl32(h1, 13); + h1 = h1*5 + 0xe6546b64; + } + + + tail = (const ma_uint8*)(data + nblocks*4); + + k1 = 0; + switch(len & 3) { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; k1 = ma_rotl32(k1, 15); k1 *= c2; h1 ^= k1; + }; + + + h1 ^= len; + h1 = ma_hash_fmix32(h1); + + return h1; +} + +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#endif +/* End MurmurHash3 */ + +static ma_uint32 ma_hash_string_32(const char* str) +{ + return ma_hash_32(str, (int)strlen(str), MA_DEFAULT_HASH_SEED); +} + +static ma_uint32 ma_hash_string_w_32(const wchar_t* str) +{ + return ma_hash_32(str, (int)wcslen(str) * sizeof(*str), MA_DEFAULT_HASH_SEED); +} + + + + +/* +Basic BST Functions +*/ +static ma_result ma_resource_manager_data_buffer_node_search(ma_resource_manager* pResourceManager, ma_uint32 hashedName32, ma_resource_manager_data_buffer_node** ppDataBufferNode) +{ + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(ppDataBufferNode != NULL); + + pCurrentNode = pResourceManager->pRootDataBufferNode; + while (pCurrentNode != NULL) { + if (hashedName32 == pCurrentNode->hashedName32) { + break; /* Found. */ + } else if (hashedName32 < pCurrentNode->hashedName32) { + pCurrentNode = pCurrentNode->pChildLo; + } else { + pCurrentNode = pCurrentNode->pChildHi; + } + } + + *ppDataBufferNode = pCurrentNode; + + if (pCurrentNode == NULL) { + return MA_DOES_NOT_EXIST; + } else { + return MA_SUCCESS; + } +} + +static ma_result ma_resource_manager_data_buffer_node_insert_point(ma_resource_manager* pResourceManager, ma_uint32 hashedName32, ma_resource_manager_data_buffer_node** ppInsertPoint) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(ppInsertPoint != NULL); + + *ppInsertPoint = NULL; + + if (pResourceManager->pRootDataBufferNode == NULL) { + return MA_SUCCESS; /* No items. */ + } + + /* We need to find the node that will become the parent of the new node. If a node is found that already has the same hashed name we need to return MA_ALREADY_EXISTS. */ + pCurrentNode = pResourceManager->pRootDataBufferNode; + while (pCurrentNode != NULL) { + if (hashedName32 == pCurrentNode->hashedName32) { + result = MA_ALREADY_EXISTS; + break; + } else { + if (hashedName32 < pCurrentNode->hashedName32) { + if (pCurrentNode->pChildLo == NULL) { + result = MA_SUCCESS; + break; + } else { + pCurrentNode = pCurrentNode->pChildLo; + } + } else { + if (pCurrentNode->pChildHi == NULL) { + result = MA_SUCCESS; + break; + } else { + pCurrentNode = pCurrentNode->pChildHi; + } + } + } + } + + *ppInsertPoint = pCurrentNode; + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_insert_at(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_resource_manager_data_buffer_node* pInsertPoint) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + /* The key must have been set before calling this function. */ + MA_ASSERT(pDataBufferNode->hashedName32 != 0); + + if (pInsertPoint == NULL) { + /* It's the first node. */ + pResourceManager->pRootDataBufferNode = pDataBufferNode; + } else { + /* It's not the first node. It needs to be inserted. */ + if (pDataBufferNode->hashedName32 < pInsertPoint->hashedName32) { + MA_ASSERT(pInsertPoint->pChildLo == NULL); + pInsertPoint->pChildLo = pDataBufferNode; + } else { + MA_ASSERT(pInsertPoint->pChildHi == NULL); + pInsertPoint->pChildHi = pDataBufferNode; + } + } + + pDataBufferNode->pParent = pInsertPoint; + + return MA_SUCCESS; +} + +#if 0 /* Unused for now. */ +static ma_result ma_resource_manager_data_buffer_node_insert(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + ma_result result; + ma_resource_manager_data_buffer_node* pInsertPoint; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + result = ma_resource_manager_data_buffer_node_insert_point(pResourceManager, pDataBufferNode->hashedName32, &pInsertPoint); + if (result != MA_SUCCESS) { + return MA_INVALID_ARGS; + } + + return ma_resource_manager_data_buffer_node_insert_at(pResourceManager, pDataBufferNode, pInsertPoint); +} +#endif + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_min(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pDataBufferNode != NULL); + + pCurrentNode = pDataBufferNode; + while (pCurrentNode->pChildLo != NULL) { + pCurrentNode = pCurrentNode->pChildLo; + } + + return pCurrentNode; +} + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_max(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pDataBufferNode != NULL); + + pCurrentNode = pDataBufferNode; + while (pCurrentNode->pChildHi != NULL) { + pCurrentNode = pCurrentNode->pChildHi; + } + + return pCurrentNode; +} + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_inorder_successor(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDataBufferNode->pChildHi != NULL); + + return ma_resource_manager_data_buffer_node_find_min(pDataBufferNode->pChildHi); +} + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_inorder_predecessor(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDataBufferNode->pChildLo != NULL); + + return ma_resource_manager_data_buffer_node_find_max(pDataBufferNode->pChildLo); +} + +static ma_result ma_resource_manager_data_buffer_node_remove(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + if (pDataBufferNode->pChildLo == NULL) { + if (pDataBufferNode->pChildHi == NULL) { + /* Simple case - deleting a buffer with no children. */ + if (pDataBufferNode->pParent == NULL) { + MA_ASSERT(pResourceManager->pRootDataBufferNode == pDataBufferNode); /* There is only a single buffer in the tree which should be equal to the root node. */ + pResourceManager->pRootDataBufferNode = NULL; + } else { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = NULL; + } else { + pDataBufferNode->pParent->pChildHi = NULL; + } + } + } else { + /* Node has one child - pChildHi != NULL. */ + pDataBufferNode->pChildHi->pParent = pDataBufferNode->pParent; + + if (pDataBufferNode->pParent == NULL) { + MA_ASSERT(pResourceManager->pRootDataBufferNode == pDataBufferNode); + pResourceManager->pRootDataBufferNode = pDataBufferNode->pChildHi; + } else { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = pDataBufferNode->pChildHi; + } else { + pDataBufferNode->pParent->pChildHi = pDataBufferNode->pChildHi; + } + } + } + } else { + if (pDataBufferNode->pChildHi == NULL) { + /* Node has one child - pChildLo != NULL. */ + pDataBufferNode->pChildLo->pParent = pDataBufferNode->pParent; + + if (pDataBufferNode->pParent == NULL) { + MA_ASSERT(pResourceManager->pRootDataBufferNode == pDataBufferNode); + pResourceManager->pRootDataBufferNode = pDataBufferNode->pChildLo; + } else { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = pDataBufferNode->pChildLo; + } else { + pDataBufferNode->pParent->pChildHi = pDataBufferNode->pChildLo; + } + } + } else { + /* Complex case - deleting a node with two children. */ + ma_resource_manager_data_buffer_node* pReplacementDataBufferNode; + + /* For now we are just going to use the in-order successor as the replacement, but we may want to try to keep this balanced by switching between the two. */ + pReplacementDataBufferNode = ma_resource_manager_data_buffer_node_find_inorder_successor(pDataBufferNode); + MA_ASSERT(pReplacementDataBufferNode != NULL); + + /* + Now that we have our replacement node we can make the change. The simple way to do this would be to just exchange the values, and then remove the replacement + node, however we track specific nodes via pointers which means we can't just swap out the values. We need to instead just change the pointers around. The + replacement node should have at most 1 child. Therefore, we can detach it in terms of our simpler cases above. What we're essentially doing is detaching the + replacement node and reinserting it into the same position as the deleted node. + */ + MA_ASSERT(pReplacementDataBufferNode->pParent != NULL); /* The replacement node should never be the root which means it should always have a parent. */ + MA_ASSERT(pReplacementDataBufferNode->pChildLo == NULL); /* Because we used in-order successor. This would be pChildHi == NULL if we used in-order predecessor. */ + + if (pReplacementDataBufferNode->pChildHi == NULL) { + if (pReplacementDataBufferNode->pParent->pChildLo == pReplacementDataBufferNode) { + pReplacementDataBufferNode->pParent->pChildLo = NULL; + } else { + pReplacementDataBufferNode->pParent->pChildHi = NULL; + } + } else { + pReplacementDataBufferNode->pChildHi->pParent = pReplacementDataBufferNode->pParent; + if (pReplacementDataBufferNode->pParent->pChildLo == pReplacementDataBufferNode) { + pReplacementDataBufferNode->pParent->pChildLo = pReplacementDataBufferNode->pChildHi; + } else { + pReplacementDataBufferNode->pParent->pChildHi = pReplacementDataBufferNode->pChildHi; + } + } + + + /* The replacement node has essentially been detached from the binary tree, so now we need to replace the old data buffer with it. The first thing to update is the parent */ + if (pDataBufferNode->pParent != NULL) { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = pReplacementDataBufferNode; + } else { + pDataBufferNode->pParent->pChildHi = pReplacementDataBufferNode; + } + } + + /* Now need to update the replacement node's pointers. */ + pReplacementDataBufferNode->pParent = pDataBufferNode->pParent; + pReplacementDataBufferNode->pChildLo = pDataBufferNode->pChildLo; + pReplacementDataBufferNode->pChildHi = pDataBufferNode->pChildHi; + + /* Now the children of the replacement node need to have their parent pointers updated. */ + if (pReplacementDataBufferNode->pChildLo != NULL) { + pReplacementDataBufferNode->pChildLo->pParent = pReplacementDataBufferNode; + } + if (pReplacementDataBufferNode->pChildHi != NULL) { + pReplacementDataBufferNode->pChildHi->pParent = pReplacementDataBufferNode; + } + + /* Now the root node needs to be updated. */ + if (pResourceManager->pRootDataBufferNode == pDataBufferNode) { + pResourceManager->pRootDataBufferNode = pReplacementDataBufferNode; + } + } + } + + return MA_SUCCESS; +} + +#if 0 /* Unused for now. */ +static ma_result ma_resource_manager_data_buffer_node_remove_by_key(ma_resource_manager* pResourceManager, ma_uint32 hashedName32) +{ + ma_result result; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + result = ma_resource_manager_data_buffer_search(pResourceManager, hashedName32, &pDataBufferNode); + if (result != MA_SUCCESS) { + return result; /* Could not find the data buffer. */ + } + + return ma_resource_manager_data_buffer_remove(pResourceManager, pDataBufferNode); +} +#endif + +static ma_resource_manager_data_supply_type ma_resource_manager_data_buffer_node_get_data_supply_type(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + return (ma_resource_manager_data_supply_type)ma_atomic_load_i32(&pDataBufferNode->data.type); +} + +static void ma_resource_manager_data_buffer_node_set_data_supply_type(ma_resource_manager_data_buffer_node* pDataBufferNode, ma_resource_manager_data_supply_type supplyType) +{ + ma_atomic_exchange_i32(&pDataBufferNode->data.type, supplyType); +} + +static ma_result ma_resource_manager_data_buffer_node_increment_ref(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_uint32* pNewRefCount) +{ + ma_uint32 refCount; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + (void)pResourceManager; + + refCount = ma_atomic_fetch_add_32(&pDataBufferNode->refCount, 1) + 1; + + if (pNewRefCount != NULL) { + *pNewRefCount = refCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_buffer_node_decrement_ref(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_uint32* pNewRefCount) +{ + ma_uint32 refCount; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + (void)pResourceManager; + + refCount = ma_atomic_fetch_sub_32(&pDataBufferNode->refCount, 1) - 1; + + if (pNewRefCount != NULL) { + *pNewRefCount = refCount; + } + + return MA_SUCCESS; +} + +static void ma_resource_manager_data_buffer_node_free(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + if (pDataBufferNode->isDataOwnedByResourceManager) { + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode) == ma_resource_manager_data_supply_type_encoded) { + ma_free((void*)pDataBufferNode->data.backend.encoded.pData, &pResourceManager->config.allocationCallbacks); + pDataBufferNode->data.backend.encoded.pData = NULL; + pDataBufferNode->data.backend.encoded.sizeInBytes = 0; + } else if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode) == ma_resource_manager_data_supply_type_decoded) { + ma_free((void*)pDataBufferNode->data.backend.decoded.pData, &pResourceManager->config.allocationCallbacks); + pDataBufferNode->data.backend.decoded.pData = NULL; + pDataBufferNode->data.backend.decoded.totalFrameCount = 0; + } else if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode) == ma_resource_manager_data_supply_type_decoded_paged) { + ma_paged_audio_buffer_data_uninit(&pDataBufferNode->data.backend.decodedPaged.data, &pResourceManager->config.allocationCallbacks); + } else { + /* Should never hit this if the node was successfully initialized. */ + MA_ASSERT(pDataBufferNode->result != MA_SUCCESS); + } + } + + /* The data buffer itself needs to be freed. */ + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); +} + +static ma_result ma_resource_manager_data_buffer_node_result(const ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + + return (ma_result)ma_atomic_load_i32((ma_result*)&pDataBufferNode->result); /* Need a naughty const-cast here. */ +} + + +static ma_bool32 ma_resource_manager_is_threading_enabled(const ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager != NULL); + + return (pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NO_THREADING) == 0; +} + + +typedef struct +{ + union + { + ma_async_notification_event e; + ma_async_notification_poll p; + } backend; /* Must be the first member. */ + ma_resource_manager* pResourceManager; +} ma_resource_manager_inline_notification; + +static ma_result ma_resource_manager_inline_notification_init(ma_resource_manager* pResourceManager, ma_resource_manager_inline_notification* pNotification) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pNotification != NULL); + + pNotification->pResourceManager = pResourceManager; + + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { + return ma_async_notification_event_init(&pNotification->backend.e); + } else { + return ma_async_notification_poll_init(&pNotification->backend.p); + } +} + +static void ma_resource_manager_inline_notification_uninit(ma_resource_manager_inline_notification* pNotification) +{ + MA_ASSERT(pNotification != NULL); + + if (ma_resource_manager_is_threading_enabled(pNotification->pResourceManager)) { + ma_async_notification_event_uninit(&pNotification->backend.e); + } else { + /* No need to uninitialize a polling notification. */ + } +} + +static void ma_resource_manager_inline_notification_wait(ma_resource_manager_inline_notification* pNotification) +{ + MA_ASSERT(pNotification != NULL); + + if (ma_resource_manager_is_threading_enabled(pNotification->pResourceManager)) { + ma_async_notification_event_wait(&pNotification->backend.e); + } else { + while (ma_async_notification_poll_is_signalled(&pNotification->backend.p) == MA_FALSE) { + ma_result result = ma_resource_manager_process_next_job(pNotification->pResourceManager); + if (result == MA_NO_DATA_AVAILABLE || result == MA_CANCELLED) { + break; + } + } + } +} + +static void ma_resource_manager_inline_notification_wait_and_uninit(ma_resource_manager_inline_notification* pNotification) +{ + ma_resource_manager_inline_notification_wait(pNotification); + ma_resource_manager_inline_notification_uninit(pNotification); +} + + +static void ma_resource_manager_data_buffer_bst_lock(ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager != NULL); + + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { +#ifndef MA_NO_THREADING + { + ma_mutex_lock(&pResourceManager->dataBufferBSTLock); + } +#else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } +#endif + } else { + /* Threading not enabled. Do nothing. */ + } +} + +static void ma_resource_manager_data_buffer_bst_unlock(ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager != NULL); + + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { +#ifndef MA_NO_THREADING + { + ma_mutex_unlock(&pResourceManager->dataBufferBSTLock); + } +#else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } +#endif + } else { + /* Threading not enabled. Do nothing. */ + } +} + +#ifndef MA_NO_THREADING +static ma_thread_result MA_THREADCALL ma_resource_manager_job_thread(void* pUserData) +{ + ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData; + MA_ASSERT(pResourceManager != NULL); + + for (;;) { + ma_result result; + ma_job job; + + result = ma_resource_manager_next_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + break; + } + + /* Terminate if we got a quit message. */ + if (job.toc.breakup.code == MA_JOB_TYPE_QUIT) { + break; + } + + ma_job_process(&job); + } + + return (ma_thread_result)0; +} +#endif + +MA_API ma_resource_manager_config ma_resource_manager_config_init(void) +{ + ma_resource_manager_config config; + + MA_ZERO_OBJECT(&config); + config.decodedFormat = ma_format_unknown; + config.decodedChannels = 0; + config.decodedSampleRate = 0; + config.jobThreadCount = 1; /* A single miniaudio-managed job thread by default. */ + config.jobQueueCapacity = MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY; + + /* Flags. */ + config.flags = 0; +#ifdef MA_NO_THREADING + { + /* Threading is disabled at compile time so disable threading at runtime as well by default. */ + config.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING; + config.jobThreadCount = 0; + } +#endif + + return config; +} + + +MA_API ma_result ma_resource_manager_init(const ma_resource_manager_config* pConfig, ma_resource_manager* pResourceManager) +{ + ma_result result; + ma_job_queue_config jobQueueConfig; + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResourceManager); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + +#ifndef MA_NO_THREADING + { + if (pConfig->jobThreadCount > ma_countof(pResourceManager->jobThreads)) { + return MA_INVALID_ARGS; /* Requesting too many job threads. */ + } + } +#endif + + pResourceManager->config = *pConfig; + ma_allocation_callbacks_init_copy(&pResourceManager->config.allocationCallbacks, &pConfig->allocationCallbacks); + + /* Get the log set up early so we can start using it as soon as possible. */ + if (pResourceManager->config.pLog == NULL) { + result = ma_log_init(&pResourceManager->config.allocationCallbacks, &pResourceManager->log); + if (result == MA_SUCCESS) { + pResourceManager->config.pLog = &pResourceManager->log; + } else { + pResourceManager->config.pLog = NULL; /* Logging is unavailable. */ + } + } + + if (pResourceManager->config.pVFS == NULL) { + result = ma_default_vfs_init(&pResourceManager->defaultVFS, &pResourceManager->config.allocationCallbacks); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the default file system. */ + } + + pResourceManager->config.pVFS = &pResourceManager->defaultVFS; + } + + /* If threading has been disabled at compile time, enfore it at run time as well. */ +#ifdef MA_NO_THREADING + { + pResourceManager->config.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING; + } +#endif + + /* We need to force MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING if MA_RESOURCE_MANAGER_FLAG_NO_THREADING is set. */ + if ((pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NO_THREADING) != 0) { + pResourceManager->config.flags |= MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING; + + /* We cannot allow job threads when MA_RESOURCE_MANAGER_FLAG_NO_THREADING has been set. This is an invalid use case. */ + if (pResourceManager->config.jobThreadCount > 0) { + return MA_INVALID_ARGS; + } + } + + /* Job queue. */ + jobQueueConfig.capacity = pResourceManager->config.jobQueueCapacity; + jobQueueConfig.flags = 0; + if ((pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING) != 0) { + if (pResourceManager->config.jobThreadCount > 0) { + return MA_INVALID_ARGS; /* Non-blocking mode is only valid for self-managed job threads. */ + } + + jobQueueConfig.flags |= MA_JOB_QUEUE_FLAG_NON_BLOCKING; + } + + result = ma_job_queue_init(&jobQueueConfig, &pResourceManager->config.allocationCallbacks, &pResourceManager->jobQueue); + if (result != MA_SUCCESS) { + return result; + } + + + /* Custom decoding backends. */ + if (pConfig->ppCustomDecodingBackendVTables != NULL && pConfig->customDecodingBackendCount > 0) { + size_t sizeInBytes = sizeof(*pResourceManager->config.ppCustomDecodingBackendVTables) * pConfig->customDecodingBackendCount; + + pResourceManager->config.ppCustomDecodingBackendVTables = (ma_decoding_backend_vtable**)ma_malloc(sizeInBytes, &pResourceManager->config.allocationCallbacks); + if (pResourceManager->config.ppCustomDecodingBackendVTables == NULL) { + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + MA_COPY_MEMORY(pResourceManager->config.ppCustomDecodingBackendVTables, pConfig->ppCustomDecodingBackendVTables, sizeInBytes); + + pResourceManager->config.customDecodingBackendCount = pConfig->customDecodingBackendCount; + pResourceManager->config.pCustomDecodingBackendUserData = pConfig->pCustomDecodingBackendUserData; + } + + + + /* Here is where we initialize our threading stuff. We don't do this if we don't support threading. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { +#ifndef MA_NO_THREADING + { + ma_uint32 iJobThread; + + /* Data buffer lock. */ + result = ma_mutex_init(&pResourceManager->dataBufferBSTLock); + if (result != MA_SUCCESS) { + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + return result; + } + + /* Create the job threads last to ensure the threads has access to valid data. */ + for (iJobThread = 0; iJobThread < pResourceManager->config.jobThreadCount; iJobThread += 1) { + result = ma_thread_create(&pResourceManager->jobThreads[iJobThread], ma_thread_priority_normal, pResourceManager->config.jobThreadStackSize, ma_resource_manager_job_thread, pResourceManager, &pResourceManager->config.allocationCallbacks); + if (result != MA_SUCCESS) { + ma_mutex_uninit(&pResourceManager->dataBufferBSTLock); + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + return result; + } + } + } +#else + { + /* Threading is disabled at compile time. We should never get here because validation checks should have already been performed. */ + MA_ASSERT(MA_FALSE); + } +#endif + } + + return MA_SUCCESS; +} + + +static void ma_resource_manager_delete_all_data_buffer_nodes(ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager); + + /* If everything was done properly, there shouldn't be any active data buffers. */ + while (pResourceManager->pRootDataBufferNode != NULL) { + ma_resource_manager_data_buffer_node* pDataBufferNode = pResourceManager->pRootDataBufferNode; + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + + /* The data buffer has been removed from the BST, so now we need to free it's data. */ + ma_resource_manager_data_buffer_node_free(pResourceManager, pDataBufferNode); + } +} + +MA_API void ma_resource_manager_uninit(ma_resource_manager* pResourceManager) +{ + if (pResourceManager == NULL) { + return; + } + + /* + Job threads need to be killed first. To do this we need to post a quit message to the message queue and then wait for the thread. The quit message will never be removed from the + queue which means it will never not be returned after being encounted for the first time which means all threads will eventually receive it. + */ + ma_resource_manager_post_job_quit(pResourceManager); + + /* Wait for every job to finish before continuing to ensure nothing is sill trying to access any of our objects below. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { +#ifndef MA_NO_THREADING + { + ma_uint32 iJobThread; + + for (iJobThread = 0; iJobThread < pResourceManager->config.jobThreadCount; iJobThread += 1) { + ma_thread_wait(&pResourceManager->jobThreads[iJobThread]); + } + } +#else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } +#endif + } + + /* At this point the thread should have returned and no other thread should be accessing our data. We can now delete all data buffers. */ + ma_resource_manager_delete_all_data_buffer_nodes(pResourceManager); + + /* The job queue is no longer needed. */ + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + + /* We're no longer doing anything with data buffers so the lock can now be uninitialized. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { +#ifndef MA_NO_THREADING + { + ma_mutex_uninit(&pResourceManager->dataBufferBSTLock); + } +#else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } +#endif + } + + ma_free(pResourceManager->config.ppCustomDecodingBackendVTables, &pResourceManager->config.allocationCallbacks); + + if (pResourceManager->config.pLog == &pResourceManager->log) { + ma_log_uninit(&pResourceManager->log); + } +} + +MA_API ma_log* ma_resource_manager_get_log(ma_resource_manager* pResourceManager) +{ + if (pResourceManager == NULL) { + return NULL; + } + + return pResourceManager->config.pLog; +} + + + +MA_API ma_resource_manager_data_source_config ma_resource_manager_data_source_config_init(void) +{ + ma_resource_manager_data_source_config config; + + MA_ZERO_OBJECT(&config); + config.rangeBegInPCMFrames = MA_DATA_SOURCE_DEFAULT_RANGE_BEG; + config.rangeEndInPCMFrames = MA_DATA_SOURCE_DEFAULT_RANGE_END; + config.loopPointBegInPCMFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG; + config.loopPointEndInPCMFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END; + config.isLooping = MA_FALSE; + + return config; +} + + +static ma_decoder_config ma_resource_manager__init_decoder_config(ma_resource_manager* pResourceManager) +{ + ma_decoder_config config; + + config = ma_decoder_config_init(pResourceManager->config.decodedFormat, pResourceManager->config.decodedChannels, pResourceManager->config.decodedSampleRate); + config.allocationCallbacks = pResourceManager->config.allocationCallbacks; + config.ppCustomBackendVTables = pResourceManager->config.ppCustomDecodingBackendVTables; + config.customBackendCount = pResourceManager->config.customDecodingBackendCount; + config.pCustomBackendUserData = pResourceManager->config.pCustomDecodingBackendUserData; + + return config; +} + +static ma_result ma_resource_manager__init_decoder(ma_resource_manager* pResourceManager, const char* pFilePath, const wchar_t* pFilePathW, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pFilePath != NULL || pFilePathW != NULL); + MA_ASSERT(pDecoder != NULL); + + config = ma_resource_manager__init_decoder_config(pResourceManager); + + if (pFilePath != NULL) { + result = ma_decoder_init_vfs(pResourceManager->config.pVFS, pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%s\". %s.\n", pFilePath, ma_result_description(result)); + return result; + } + } else { + result = ma_decoder_init_vfs_w(pResourceManager->config.pVFS, pFilePathW, &config, pDecoder); + if (result != MA_SUCCESS) { +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(_MSC_VER) + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%ls\". %s.\n", pFilePathW, ma_result_description(result)); +#endif + return result; + } + } + + return MA_SUCCESS; +} + +static ma_bool32 ma_resource_manager_data_buffer_has_connector(ma_resource_manager_data_buffer* pDataBuffer) +{ + return ma_atomic_bool32_get(&pDataBuffer->isConnectorInitialized); +} + +static ma_data_source* ma_resource_manager_data_buffer_get_connector(ma_resource_manager_data_buffer* pDataBuffer) +{ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE) { + return NULL; /* Connector not yet initialized. */ + } + + switch (pDataBuffer->pNode->data.type) + { + case ma_resource_manager_data_supply_type_encoded: return &pDataBuffer->connector.decoder; + case ma_resource_manager_data_supply_type_decoded: return &pDataBuffer->connector.buffer; + case ma_resource_manager_data_supply_type_decoded_paged: return &pDataBuffer->connector.pagedBuffer; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + ma_log_postf(ma_resource_manager_get_log(pDataBuffer->pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to retrieve data buffer connector. Unknown data supply type.\n"); + return NULL; + }; + }; +} + +static ma_result ma_resource_manager_data_buffer_init_connector(ma_resource_manager_data_buffer* pDataBuffer, const ma_resource_manager_data_source_config* pConfig, ma_async_notification* pInitNotification, ma_fence* pInitFence) +{ + ma_result result; + + MA_ASSERT(pDataBuffer != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE); + + /* The underlying data buffer must be initialized before we'll be able to know how to initialize the backend. */ + result = ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode); + if (result != MA_SUCCESS && result != MA_BUSY) { + return result; /* The data buffer is in an erroneous state. */ + } + + /* + We need to initialize either a ma_decoder or an ma_audio_buffer depending on whether or not the backing data is encoded or decoded. These act as the + "instance" to the data and are used to form the connection between underlying data buffer and the data source. If the data buffer is decoded, we can use + an ma_audio_buffer. This enables us to use memory mapping when mixing which saves us a bit of data movement overhead. + */ + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: /* Connector is a decoder. */ + { + ma_decoder_config config; + config = ma_resource_manager__init_decoder_config(pDataBuffer->pResourceManager); + result = ma_decoder_init_memory(pDataBuffer->pNode->data.backend.encoded.pData, pDataBuffer->pNode->data.backend.encoded.sizeInBytes, &config, &pDataBuffer->connector.decoder); + } break; + + case ma_resource_manager_data_supply_type_decoded: /* Connector is an audio buffer. */ + { + ma_audio_buffer_config config; + config = ma_audio_buffer_config_init(pDataBuffer->pNode->data.backend.decoded.format, pDataBuffer->pNode->data.backend.decoded.channels, pDataBuffer->pNode->data.backend.decoded.totalFrameCount, pDataBuffer->pNode->data.backend.decoded.pData, NULL); + result = ma_audio_buffer_init(&config, &pDataBuffer->connector.buffer); + } break; + + case ma_resource_manager_data_supply_type_decoded_paged: /* Connector is a paged audio buffer. */ + { + ma_paged_audio_buffer_config config; + config = ma_paged_audio_buffer_config_init(&pDataBuffer->pNode->data.backend.decodedPaged.data); + result = ma_paged_audio_buffer_init(&config, &pDataBuffer->connector.pagedBuffer); + } break; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unknown data supply type. Should never happen. Need to post an error here. */ + return MA_INVALID_ARGS; + }; + } + + /* + Initialization of the connector is when we can fire the init notification. This will give the application access to + the format/channels/rate of the data source. + */ + if (result == MA_SUCCESS) { + /* + The resource manager supports the ability to set the range and loop settings via a config at + initialization time. This results in an case where the ranges could be set explicitly via + ma_data_source_set_*() before we get to this point here. If this happens, we'll end up + hitting a case where we just override those settings which results in what feels like a bug. + + To address this we only change the relevant properties if they're not equal to defaults. If + they're equal to defaults there's no need to change them anyway. If they're *not* set to the + default values, we can assume the user has set the range and loop settings via the config. If + they're doing their own calls to ma_data_source_set_*() in addition to setting them via the + config, that's entirely on the caller and any synchronization issue becomes their problem. + */ + if (pConfig->rangeBegInPCMFrames != MA_DATA_SOURCE_DEFAULT_RANGE_BEG || pConfig->rangeEndInPCMFrames != MA_DATA_SOURCE_DEFAULT_RANGE_END) { + ma_data_source_set_range_in_pcm_frames(pDataBuffer, pConfig->rangeBegInPCMFrames, pConfig->rangeEndInPCMFrames); + } + + if (pConfig->loopPointBegInPCMFrames != MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG || pConfig->loopPointEndInPCMFrames != MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END) { + ma_data_source_set_loop_point_in_pcm_frames(pDataBuffer, pConfig->loopPointBegInPCMFrames, pConfig->loopPointEndInPCMFrames); + } + + if (pConfig->isLooping != MA_FALSE) { + ma_data_source_set_looping(pDataBuffer, pConfig->isLooping); + } + + ma_atomic_bool32_set(&pDataBuffer->isConnectorInitialized, MA_TRUE); + + if (pInitNotification != NULL) { + ma_async_notification_signal(pInitNotification); + } + + if (pInitFence != NULL) { + ma_fence_release(pInitFence); + } + } + + /* At this point the backend should be initialized. We do *not* want to set pDataSource->result here - that needs to be done at a higher level to ensure it's done as the last step. */ + return result; +} + +static ma_result ma_resource_manager_data_buffer_uninit_connector(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer* pDataBuffer) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBuffer != NULL); + + (void)pResourceManager; + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: /* Connector is a decoder. */ + { + ma_decoder_uninit(&pDataBuffer->connector.decoder); + } break; + + case ma_resource_manager_data_supply_type_decoded: /* Connector is an audio buffer. */ + { + ma_audio_buffer_uninit(&pDataBuffer->connector.buffer); + } break; + + case ma_resource_manager_data_supply_type_decoded_paged: /* Connector is a paged audio buffer. */ + { + ma_paged_audio_buffer_uninit(&pDataBuffer->connector.pagedBuffer); + } break; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unknown data supply type. Should never happen. Need to post an error here. */ + return MA_INVALID_ARGS; + }; + } + + return MA_SUCCESS; +} + +static ma_uint32 ma_resource_manager_data_buffer_node_next_execution_order(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + return ma_atomic_fetch_add_32(&pDataBufferNode->executionCounter, 1); +} + +static ma_result ma_resource_manager_data_buffer_node_init_supply_encoded(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, const char* pFilePath, const wchar_t* pFilePathW) +{ + ma_result result; + size_t dataSizeInBytes; + void* pData; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pFilePath != NULL || pFilePathW != NULL); + + result = ma_vfs_open_and_read_file_ex(pResourceManager->config.pVFS, pFilePath, pFilePathW, &pData, &dataSizeInBytes, &pResourceManager->config.allocationCallbacks); + if (result != MA_SUCCESS) { + if (pFilePath != NULL) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%s\". %s.\n", pFilePath, ma_result_description(result)); + } else { +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(_MSC_VER) + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%ls\". %s.\n", pFilePathW, ma_result_description(result)); +#endif + } + + return result; + } + + pDataBufferNode->data.backend.encoded.pData = pData; + pDataBufferNode->data.backend.encoded.sizeInBytes = dataSizeInBytes; + ma_resource_manager_data_buffer_node_set_data_supply_type(pDataBufferNode, ma_resource_manager_data_supply_type_encoded); /* <-- Must be set last. */ + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_buffer_node_init_supply_decoded(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, const char* pFilePath, const wchar_t* pFilePathW, ma_uint32 flags, ma_decoder** ppDecoder) +{ + ma_result result = MA_SUCCESS; + ma_decoder* pDecoder; + ma_uint64 totalFrameCount; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(ppDecoder != NULL); + MA_ASSERT(pFilePath != NULL || pFilePathW != NULL); + + *ppDecoder = NULL; /* For safety. */ + + pDecoder = (ma_decoder*)ma_malloc(sizeof(*pDecoder), &pResourceManager->config.allocationCallbacks); + if (pDecoder == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_resource_manager__init_decoder(pResourceManager, pFilePath, pFilePathW, pDecoder); + if (result != MA_SUCCESS) { + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return result; + } + + /* + At this point we have the decoder and we now need to initialize the data supply. This will + be either a decoded buffer, or a decoded paged buffer. A regular buffer is just one big heap + allocated buffer, whereas a paged buffer is a linked list of paged-sized buffers. The latter + is used when the length of a sound is unknown until a full decode has been performed. + */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH) == 0) { + result = ma_decoder_get_length_in_pcm_frames(pDecoder, &totalFrameCount); + if (result != MA_SUCCESS) { + return result; + } + } else { + totalFrameCount = 0; + } + + if (totalFrameCount > 0) { + /* It's a known length. The data supply is a regular decoded buffer. */ + ma_uint64 dataSizeInBytes; + void* pData; + + dataSizeInBytes = totalFrameCount * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels); + if (dataSizeInBytes > MA_SIZE_MAX) { + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return MA_TOO_BIG; + } + + pData = ma_malloc((size_t)dataSizeInBytes, &pResourceManager->config.allocationCallbacks); + if (pData == NULL) { + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + /* The buffer needs to be initialized to silence in case the caller reads from it. */ + ma_silence_pcm_frames(pData, totalFrameCount, pDecoder->outputFormat, pDecoder->outputChannels); + + /* Data has been allocated and the data supply can now be initialized. */ + pDataBufferNode->data.backend.decoded.pData = pData; + pDataBufferNode->data.backend.decoded.totalFrameCount = totalFrameCount; + pDataBufferNode->data.backend.decoded.format = pDecoder->outputFormat; + pDataBufferNode->data.backend.decoded.channels = pDecoder->outputChannels; + pDataBufferNode->data.backend.decoded.sampleRate = pDecoder->outputSampleRate; + pDataBufferNode->data.backend.decoded.decodedFrameCount = 0; + ma_resource_manager_data_buffer_node_set_data_supply_type(pDataBufferNode, ma_resource_manager_data_supply_type_decoded); /* <-- Must be set last. */ + } else { + /* + It's an unknown length. The data supply is a paged decoded buffer. Setting this up is + actually easier than the non-paged decoded buffer because we just need to initialize + a ma_paged_audio_buffer object. + */ + result = ma_paged_audio_buffer_data_init(pDecoder->outputFormat, pDecoder->outputChannels, &pDataBufferNode->data.backend.decodedPaged.data); + if (result != MA_SUCCESS) { + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return result; + } + + pDataBufferNode->data.backend.decodedPaged.sampleRate = pDecoder->outputSampleRate; + pDataBufferNode->data.backend.decodedPaged.decodedFrameCount = 0; + ma_resource_manager_data_buffer_node_set_data_supply_type(pDataBufferNode, ma_resource_manager_data_supply_type_decoded_paged); /* <-- Must be set last. */ + } + + *ppDecoder = pDecoder; + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_buffer_node_decode_next_page(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_decoder* pDecoder) +{ + ma_result result = MA_SUCCESS; + ma_uint64 pageSizeInFrames; + ma_uint64 framesToTryReading; + ma_uint64 framesRead; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDecoder != NULL); + + /* We need to know the size of a page in frames to know how many frames to decode. */ + pageSizeInFrames = MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS * (pDecoder->outputSampleRate/1000); + framesToTryReading = pageSizeInFrames; + + /* + Here is where we do the decoding of the next page. We'll run a slightly different path depending + on whether or not we're using a flat or paged buffer because the allocation of the page differs + between the two. For a flat buffer it's an offset to an already-allocated buffer. For a paged + buffer, we need to allocate a new page and attach it to the linked list. + */ + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode)) + { + case ma_resource_manager_data_supply_type_decoded: + { + /* The destination buffer is an offset to the existing buffer. Don't read more than we originally retrieved when we first initialized the decoder. */ + void* pDst; + ma_uint64 framesRemaining = pDataBufferNode->data.backend.decoded.totalFrameCount - pDataBufferNode->data.backend.decoded.decodedFrameCount; + if (framesToTryReading > framesRemaining) { + framesToTryReading = framesRemaining; + } + + if (framesToTryReading > 0) { + pDst = ma_offset_ptr( + pDataBufferNode->data.backend.decoded.pData, + pDataBufferNode->data.backend.decoded.decodedFrameCount * ma_get_bytes_per_frame(pDataBufferNode->data.backend.decoded.format, pDataBufferNode->data.backend.decoded.channels) + ); + MA_ASSERT(pDst != NULL); + + result = ma_decoder_read_pcm_frames(pDecoder, pDst, framesToTryReading, &framesRead); + if (framesRead > 0) { + pDataBufferNode->data.backend.decoded.decodedFrameCount += framesRead; + } + } else { + framesRead = 0; + } + } break; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + /* The destination buffer is a freshly allocated page. */ + ma_paged_audio_buffer_page* pPage; + + result = ma_paged_audio_buffer_data_allocate_page(&pDataBufferNode->data.backend.decodedPaged.data, framesToTryReading, NULL, &pResourceManager->config.allocationCallbacks, &pPage); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder_read_pcm_frames(pDecoder, pPage->pAudioData, framesToTryReading, &framesRead); + if (framesRead > 0) { + pPage->sizeInFrames = framesRead; + + result = ma_paged_audio_buffer_data_append_page(&pDataBufferNode->data.backend.decodedPaged.data, pPage); + if (result == MA_SUCCESS) { + pDataBufferNode->data.backend.decodedPaged.decodedFrameCount += framesRead; + } else { + /* Failed to append the page. Just abort and set the status to MA_AT_END. */ + ma_paged_audio_buffer_data_free_page(&pDataBufferNode->data.backend.decodedPaged.data, pPage, &pResourceManager->config.allocationCallbacks); + result = MA_AT_END; + } + } else { + /* No frames were read. Free the page and just set the status to MA_AT_END. */ + ma_paged_audio_buffer_data_free_page(&pDataBufferNode->data.backend.decodedPaged.data, pPage, &pResourceManager->config.allocationCallbacks); + result = MA_AT_END; + } + } break; + + case ma_resource_manager_data_supply_type_encoded: + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unexpected data supply type. */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Unexpected data supply type (%d) when decoding page.", ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode)); + return MA_ERROR; + }; + } + + if (result == MA_SUCCESS && framesRead == 0) { + result = MA_AT_END; + } + + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_acquire_critical_section(ma_resource_manager* pResourceManager, const char* pFilePath, const wchar_t* pFilePathW, ma_uint32 hashedName32, ma_uint32 flags, const ma_resource_manager_data_supply* pExistingData, ma_fence* pInitFence, ma_fence* pDoneFence, ma_resource_manager_inline_notification* pInitNotification, ma_resource_manager_data_buffer_node** ppDataBufferNode) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager_data_buffer_node* pDataBufferNode = NULL; + ma_resource_manager_data_buffer_node* pInsertPoint; + + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = NULL; + } + + result = ma_resource_manager_data_buffer_node_insert_point(pResourceManager, hashedName32, &pInsertPoint); + if (result == MA_ALREADY_EXISTS) { + /* The node already exists. We just need to increment the reference count. */ + pDataBufferNode = pInsertPoint; + + result = ma_resource_manager_data_buffer_node_increment_ref(pResourceManager, pDataBufferNode, NULL); + if (result != MA_SUCCESS) { + return result; /* Should never happen. Failed to increment the reference count. */ + } + + result = MA_ALREADY_EXISTS; + goto done; + } else { + /* + The node does not already exist. We need to post a LOAD_DATA_BUFFER_NODE job here. This + needs to be done inside the critical section to ensure an uninitialization of the node + does not occur before initialization on another thread. + */ + pDataBufferNode = (ma_resource_manager_data_buffer_node*)ma_malloc(sizeof(*pDataBufferNode), &pResourceManager->config.allocationCallbacks); + if (pDataBufferNode == NULL) { + return MA_OUT_OF_MEMORY; + } + + MA_ZERO_OBJECT(pDataBufferNode); + pDataBufferNode->hashedName32 = hashedName32; + pDataBufferNode->refCount = 1; /* Always set to 1 by default (this is our first reference). */ + + if (pExistingData == NULL) { + pDataBufferNode->data.type = ma_resource_manager_data_supply_type_unknown; /* <-- We won't know this until we start decoding. */ + pDataBufferNode->result = MA_BUSY; /* Must be set to MA_BUSY before we leave the critical section, so might as well do it now. */ + pDataBufferNode->isDataOwnedByResourceManager = MA_TRUE; + } else { + pDataBufferNode->data = *pExistingData; + pDataBufferNode->result = MA_SUCCESS; /* Not loading asynchronously, so just set the status */ + pDataBufferNode->isDataOwnedByResourceManager = MA_FALSE; + } + + result = ma_resource_manager_data_buffer_node_insert_at(pResourceManager, pDataBufferNode, pInsertPoint); + if (result != MA_SUCCESS) { + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + return result; /* Should never happen. Failed to insert the data buffer into the BST. */ + } + + /* + Here is where we'll post the job, but only if we're loading asynchronously. If we're + loading synchronously we'll defer loading to a later stage, outside of the critical + section. + */ + if (pDataBufferNode->isDataOwnedByResourceManager && (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) != 0) { + /* Loading asynchronously. Post the job. */ + ma_job job; + char* pFilePathCopy = NULL; + wchar_t* pFilePathWCopy = NULL; + + /* We need a copy of the file path. We should probably make this more efficient, but for now we'll do a transient memory allocation. */ + if (pFilePath != NULL) { + pFilePathCopy = ma_copy_string(pFilePath, &pResourceManager->config.allocationCallbacks); + } else { + pFilePathWCopy = ma_copy_string_w(pFilePathW, &pResourceManager->config.allocationCallbacks); + } + + if (pFilePathCopy == NULL && pFilePathWCopy == NULL) { + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_init(pResourceManager, pInitNotification); + } + + /* Acquire init and done fences before posting the job. These will be unacquired by the job thread. */ + if (pInitFence != NULL) { ma_fence_acquire(pInitFence); } + if (pDoneFence != NULL) { ma_fence_acquire(pDoneFence); } + + /* We now have everything we need to post the job to the job thread. */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE); + job.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); + job.data.resourceManager.loadDataBufferNode.pResourceManager = pResourceManager; + job.data.resourceManager.loadDataBufferNode.pDataBufferNode = pDataBufferNode; + job.data.resourceManager.loadDataBufferNode.pFilePath = pFilePathCopy; + job.data.resourceManager.loadDataBufferNode.pFilePathW = pFilePathWCopy; + job.data.resourceManager.loadDataBufferNode.flags = flags; + job.data.resourceManager.loadDataBufferNode.pInitNotification = ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) ? pInitNotification : NULL; + job.data.resourceManager.loadDataBufferNode.pDoneNotification = NULL; + job.data.resourceManager.loadDataBufferNode.pInitFence = pInitFence; + job.data.resourceManager.loadDataBufferNode.pDoneFence = pDoneFence; + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + result = ma_job_process(&job); + } else { + result = ma_resource_manager_post_job(pResourceManager, &job); + } + + if (result != MA_SUCCESS) { + /* Failed to post job. Probably ran out of memory. */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE job. %s.\n", ma_result_description(result)); + + /* + Fences were acquired before posting the job, but since the job was not able to + be posted, we need to make sure we release them so nothing gets stuck waiting. + */ + if (pInitFence != NULL) { ma_fence_release(pInitFence); } + if (pDoneFence != NULL) { ma_fence_release(pDoneFence); } + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_uninit(pInitNotification); + } else { + /* These will have been freed by the job thread, but with WAIT_INIT they will already have happend sinced the job has already been handled. */ + ma_free(pFilePathCopy, &pResourceManager->config.allocationCallbacks); + ma_free(pFilePathWCopy, &pResourceManager->config.allocationCallbacks); + } + + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + + return result; + } + } + } + +done: + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = pDataBufferNode; + } + + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_acquire(ma_resource_manager* pResourceManager, const char* pFilePath, const wchar_t* pFilePathW, ma_uint32 hashedName32, ma_uint32 flags, const ma_resource_manager_data_supply* pExistingData, ma_fence* pInitFence, ma_fence* pDoneFence, ma_resource_manager_data_buffer_node** ppDataBufferNode) +{ + ma_result result = MA_SUCCESS; + ma_bool32 nodeAlreadyExists = MA_FALSE; + ma_resource_manager_data_buffer_node* pDataBufferNode = NULL; + ma_resource_manager_inline_notification initNotification; /* Used when the WAIT_INIT flag is set. */ + + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = NULL; /* Safety. */ + } + + if (pResourceManager == NULL || (pFilePath == NULL && pFilePathW == NULL && hashedName32 == 0)) { + return MA_INVALID_ARGS; + } + + /* If we're specifying existing data, it must be valid. */ + if (pExistingData != NULL && pExistingData->type == ma_resource_manager_data_supply_type_unknown) { + return MA_INVALID_ARGS; + } + + /* If we don't support threading, remove the ASYNC flag to make the rest of this a bit simpler. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager) == MA_FALSE) { + flags &= ~MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC; + } + + if (hashedName32 == 0) { + if (pFilePath != NULL) { + hashedName32 = ma_hash_string_32(pFilePath); + } else { + hashedName32 = ma_hash_string_w_32(pFilePathW); + } + } + + /* + Here is where we either increment the node's reference count or allocate a new one and add it + to the BST. When allocating a new node, we need to make sure the LOAD_DATA_BUFFER_NODE job is + posted inside the critical section just in case the caller immediately uninitializes the node + as this will ensure the FREE_DATA_BUFFER_NODE job is given an execution order such that the + node is not uninitialized before initialization. + */ + ma_resource_manager_data_buffer_bst_lock(pResourceManager); + { + result = ma_resource_manager_data_buffer_node_acquire_critical_section(pResourceManager, pFilePath, pFilePathW, hashedName32, flags, pExistingData, pInitFence, pDoneFence, &initNotification, &pDataBufferNode); + } + ma_resource_manager_data_buffer_bst_unlock(pResourceManager); + + if (result == MA_ALREADY_EXISTS) { + nodeAlreadyExists = MA_TRUE; + result = MA_SUCCESS; + } else { + if (result != MA_SUCCESS) { + return result; + } + } + + /* + If we're loading synchronously, we'll need to load everything now. When loading asynchronously, + a job will have been posted inside the BST critical section so that an uninitialization can be + allocated an appropriate execution order thereby preventing it from being uninitialized before + the node is initialized by the decoding thread(s). + */ + if (nodeAlreadyExists == MA_FALSE) { /* Don't need to try loading anything if the node already exists. */ + if (pFilePath == NULL && pFilePathW == NULL) { + /* + If this path is hit, it means a buffer is being copied (i.e. initialized from only the + hashed name), but that node has been freed in the meantime, probably from some other + thread. This is an invalid operation. + */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Cloning data buffer node failed because the source node was released. The source node must remain valid until the cloning has completed.\n"); + result = MA_INVALID_OPERATION; + goto done; + } + + if (pDataBufferNode->isDataOwnedByResourceManager) { + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) == 0) { + /* Loading synchronously. Load the sound in it's entirety here. */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE) == 0) { + /* No decoding. This is the simple case - just store the file contents in memory. */ + result = ma_resource_manager_data_buffer_node_init_supply_encoded(pResourceManager, pDataBufferNode, pFilePath, pFilePathW); + if (result != MA_SUCCESS) { + goto done; + } + } else { + /* Decoding. We do this the same way as we do when loading asynchronously. */ + ma_decoder* pDecoder; + result = ma_resource_manager_data_buffer_node_init_supply_decoded(pResourceManager, pDataBufferNode, pFilePath, pFilePathW, flags, &pDecoder); + if (result != MA_SUCCESS) { + goto done; + } + + /* We have the decoder, now decode page by page just like we do when loading asynchronously. */ + for (;;) { + /* Decode next page. */ + result = ma_resource_manager_data_buffer_node_decode_next_page(pResourceManager, pDataBufferNode, pDecoder); + if (result != MA_SUCCESS) { + break; /* Will return MA_AT_END when the last page has been decoded. */ + } + } + + /* Reaching the end needs to be considered successful. */ + if (result == MA_AT_END) { + result = MA_SUCCESS; + } + + /* + At this point the data buffer is either fully decoded or some error occurred. Either + way, the decoder is no longer necessary. + */ + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + } + + /* Getting here means we were successful. Make sure the status of the node is updated accordingly. */ + ma_atomic_exchange_i32(&pDataBufferNode->result, result); + } else { + /* Loading asynchronously. We may need to wait for initialization. */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_wait(&initNotification); + } + } + } else { + /* The data is not managed by the resource manager so there's nothing else to do. */ + MA_ASSERT(pExistingData != NULL); + } + } + +done: + /* If we failed to initialize the data buffer we need to free it. */ + if (result != MA_SUCCESS) { + if (nodeAlreadyExists == MA_FALSE) { + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + } + } + + /* + The init notification needs to be uninitialized. This will be used if the node does not already + exist, and we've specified ASYNC | WAIT_INIT. + */ + if (nodeAlreadyExists == MA_FALSE && pDataBufferNode->isDataOwnedByResourceManager && (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) != 0) { + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_uninit(&initNotification); + } + } + + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = pDataBufferNode; + } + + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_unacquire(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, const char* pName, const wchar_t* pNameW) +{ + ma_result result = MA_SUCCESS; + ma_uint32 refCount = 0xFFFFFFFF; /* The new reference count of the node after decrementing. Initialize to non-0 to be safe we don't fall into the freeing path. */ + ma_uint32 hashedName32 = 0; + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + if (pDataBufferNode == NULL) { + if (pName == NULL && pNameW == NULL) { + return MA_INVALID_ARGS; + } + + if (pName != NULL) { + hashedName32 = ma_hash_string_32(pName); + } else { + hashedName32 = ma_hash_string_w_32(pNameW); + } + } + + /* + The first thing to do is decrement the reference counter of the node. Then, if the reference + count is zero, we need to free the node. If the node is still in the process of loading, we'll + need to post a job to the job queue to free the node. Otherwise we'll just do it here. + */ + ma_resource_manager_data_buffer_bst_lock(pResourceManager); + { + /* Might need to find the node. Must be done inside the critical section. */ + if (pDataBufferNode == NULL) { + result = ma_resource_manager_data_buffer_node_search(pResourceManager, hashedName32, &pDataBufferNode); + if (result != MA_SUCCESS) { + goto stage2; /* Couldn't find the node. */ + } + } + + result = ma_resource_manager_data_buffer_node_decrement_ref(pResourceManager, pDataBufferNode, &refCount); + if (result != MA_SUCCESS) { + goto stage2; /* Should never happen. */ + } + + if (refCount == 0) { + result = ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + if (result != MA_SUCCESS) { + goto stage2; /* An error occurred when trying to remove the data buffer. This should never happen. */ + } + } + } + ma_resource_manager_data_buffer_bst_unlock(pResourceManager); + +stage2: + if (result != MA_SUCCESS) { + return result; + } + + /* + Here is where we need to free the node. We don't want to do this inside the critical section + above because we want to keep that as small as possible for multi-threaded efficiency. + */ + if (refCount == 0) { + if (ma_resource_manager_data_buffer_node_result(pDataBufferNode) == MA_BUSY) { + /* The sound is still loading. We need to delay the freeing of the node to a safe time. */ + ma_job job; + + /* We need to mark the node as unavailable for the sake of the resource manager worker threads. */ + ma_atomic_exchange_i32(&pDataBufferNode->result, MA_UNAVAILABLE); + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE); + job.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); + job.data.resourceManager.freeDataBufferNode.pResourceManager = pResourceManager; + job.data.resourceManager.freeDataBufferNode.pDataBufferNode = pDataBufferNode; + + result = ma_resource_manager_post_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE job. %s.\n", ma_result_description(result)); + return result; + } + + /* If we don't support threading, process the job queue here. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager) == MA_FALSE) { + while (ma_resource_manager_data_buffer_node_result(pDataBufferNode) == MA_BUSY) { + result = ma_resource_manager_process_next_job(pResourceManager); + if (result == MA_NO_DATA_AVAILABLE || result == MA_CANCELLED) { + result = MA_SUCCESS; + break; + } + } + } else { + /* Threading is enabled. The job queue will deal with the rest of the cleanup from here. */ + } + } else { + /* The sound isn't loading so we can just free the node here. */ + ma_resource_manager_data_buffer_node_free(pResourceManager, pDataBufferNode); + } + } + + return result; +} + + + +static ma_uint32 ma_resource_manager_data_buffer_next_execution_order(ma_resource_manager_data_buffer* pDataBuffer) +{ + MA_ASSERT(pDataBuffer != NULL); + return ma_atomic_fetch_add_32(&pDataBuffer->executionCounter, 1); +} + +static ma_result ma_resource_manager_data_buffer_cb__read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_resource_manager_data_buffer_read_pcm_frames((ma_resource_manager_data_buffer*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_resource_manager_data_buffer_cb__seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_resource_manager_data_buffer_seek_to_pcm_frame((ma_resource_manager_data_buffer*)pDataSource, frameIndex); +} + +static ma_result ma_resource_manager_data_buffer_cb__get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_resource_manager_data_buffer_get_data_format((ma_resource_manager_data_buffer*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_resource_manager_data_buffer_cb__get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_resource_manager_data_buffer_get_cursor_in_pcm_frames((ma_resource_manager_data_buffer*)pDataSource, pCursor); +} + +static ma_result ma_resource_manager_data_buffer_cb__get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_resource_manager_data_buffer_get_length_in_pcm_frames((ma_resource_manager_data_buffer*)pDataSource, pLength); +} + +static ma_result ma_resource_manager_data_buffer_cb__set_looping(ma_data_source* pDataSource, ma_bool32 isLooping) +{ + ma_resource_manager_data_buffer* pDataBuffer = (ma_resource_manager_data_buffer*)pDataSource; + MA_ASSERT(pDataBuffer != NULL); + + ma_atomic_exchange_32(&pDataBuffer->isLooping, isLooping); + + /* The looping state needs to be set on the connector as well or else looping won't work when we read audio data. */ + ma_data_source_set_looping(ma_resource_manager_data_buffer_get_connector(pDataBuffer), isLooping); + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_resource_manager_data_buffer_vtable = +{ + ma_resource_manager_data_buffer_cb__read_pcm_frames, + ma_resource_manager_data_buffer_cb__seek_to_pcm_frame, + ma_resource_manager_data_buffer_cb__get_data_format, + ma_resource_manager_data_buffer_cb__get_cursor_in_pcm_frames, + ma_resource_manager_data_buffer_cb__get_length_in_pcm_frames, + ma_resource_manager_data_buffer_cb__set_looping, + 0 +}; + +static ma_result ma_resource_manager_data_buffer_init_ex_internal(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_uint32 hashedName32, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager_data_buffer_node* pDataBufferNode; + ma_data_source_config dataSourceConfig; + ma_bool32 async; + ma_uint32 flags; + ma_resource_manager_pipeline_notifications notifications; + + if (pDataBuffer == NULL) { + if (pConfig != NULL && pConfig->pNotifications != NULL) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(pConfig->pNotifications); + } + + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataBuffer); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pNotifications != NULL) { + notifications = *pConfig->pNotifications; /* From here on out we should be referencing `notifications` instead of `pNotifications`. Set this to NULL to catch errors at testing time. */ + } else { + MA_ZERO_OBJECT(¬ifications); + } + + /* For safety, always remove the ASYNC flag if threading is disabled on the resource manager. */ + flags = pConfig->flags; + if (ma_resource_manager_is_threading_enabled(pResourceManager) == MA_FALSE) { + flags &= ~MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC; + } + + async = (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) != 0; + + /* + Fences need to be acquired before doing anything. These must be aquired and released outside of + the node to ensure there's no holes where ma_fence_wait() could prematurely return before the + data buffer has completed initialization. + + When loading asynchronously, the node acquisition routine below will acquire the fences on this + thread and then release them on the async thread when the operation is complete. + + These fences are always released at the "done" tag at the end of this function. They'll be + acquired a second if loading asynchronously. This double acquisition system is just done to + simplify code maintanence. + */ + ma_resource_manager_pipeline_notifications_acquire_all_fences(¬ifications); + { + /* We first need to acquire a node. If ASYNC is not set, this will not return until the entire sound has been loaded. */ + result = ma_resource_manager_data_buffer_node_acquire(pResourceManager, pConfig->pFilePath, pConfig->pFilePathW, hashedName32, flags, NULL, notifications.init.pFence, notifications.done.pFence, &pDataBufferNode); + if (result != MA_SUCCESS) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + goto done; + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_resource_manager_data_buffer_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pDataBuffer->ds); + if (result != MA_SUCCESS) { + ma_resource_manager_data_buffer_node_unacquire(pResourceManager, pDataBufferNode, NULL, NULL); + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + goto done; + } + + pDataBuffer->pResourceManager = pResourceManager; + pDataBuffer->pNode = pDataBufferNode; + pDataBuffer->flags = flags; + pDataBuffer->result = MA_BUSY; /* Always default to MA_BUSY for safety. It'll be overwritten when loading completes or an error occurs. */ + + /* If we're loading asynchronously we need to post a job to the job queue to initialize the connector. */ + if (async == MA_FALSE || ma_resource_manager_data_buffer_node_result(pDataBufferNode) == MA_SUCCESS) { + /* Loading synchronously or the data has already been fully loaded. We can just initialize the connector from here without a job. */ + result = ma_resource_manager_data_buffer_init_connector(pDataBuffer, pConfig, NULL, NULL); + ma_atomic_exchange_i32(&pDataBuffer->result, result); + + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + goto done; + } else { + /* The node's data supply isn't initialized yet. The caller has requested that we load asynchronously so we need to post a job to do this. */ + ma_job job; + ma_resource_manager_inline_notification initNotification; /* Used when the WAIT_INIT flag is set. */ + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_init(pResourceManager, &initNotification); + } + + /* + The status of the data buffer needs to be set to MA_BUSY before posting the job so that the + worker thread is aware of it's busy state. If the LOAD_DATA_BUFFER job sees a status other + than MA_BUSY, it'll assume an error and fall through to an early exit. + */ + ma_atomic_exchange_i32(&pDataBuffer->result, MA_BUSY); + + /* Acquire fences a second time. These will be released by the async thread. */ + ma_resource_manager_pipeline_notifications_acquire_all_fences(¬ifications); + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER); + job.order = ma_resource_manager_data_buffer_next_execution_order(pDataBuffer); + job.data.resourceManager.loadDataBuffer.pDataBuffer = pDataBuffer; + job.data.resourceManager.loadDataBuffer.pInitNotification = ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) ? &initNotification : notifications.init.pNotification; + job.data.resourceManager.loadDataBuffer.pDoneNotification = notifications.done.pNotification; + job.data.resourceManager.loadDataBuffer.pInitFence = notifications.init.pFence; + job.data.resourceManager.loadDataBuffer.pDoneFence = notifications.done.pFence; + job.data.resourceManager.loadDataBuffer.rangeBegInPCMFrames = pConfig->rangeBegInPCMFrames; + job.data.resourceManager.loadDataBuffer.rangeEndInPCMFrames = pConfig->rangeEndInPCMFrames; + job.data.resourceManager.loadDataBuffer.loopPointBegInPCMFrames = pConfig->loopPointBegInPCMFrames; + job.data.resourceManager.loadDataBuffer.loopPointEndInPCMFrames = pConfig->loopPointEndInPCMFrames; + job.data.resourceManager.loadDataBuffer.isLooping = pConfig->isLooping; + + /* If we need to wait for initialization to complete we can just process the job in place. */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + result = ma_job_process(&job); + } else { + result = ma_resource_manager_post_job(pResourceManager, &job); + } + + if (result != MA_SUCCESS) { + /* We failed to post the job. Most likely there isn't enough room in the queue's buffer. */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER job. %s.\n", ma_result_description(result)); + ma_atomic_exchange_i32(&pDataBuffer->result, result); + + /* Release the fences after the result has been set on the data buffer. */ + ma_resource_manager_pipeline_notifications_release_all_fences(¬ifications); + } else { + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_wait(&initNotification); + + if (notifications.init.pNotification != NULL) { + ma_async_notification_signal(notifications.init.pNotification); + } + + /* NOTE: Do not release the init fence here. It will have been done by the job. */ + + /* Make sure we return an error if initialization failed on the async thread. */ + result = ma_resource_manager_data_buffer_result(pDataBuffer); + if (result == MA_BUSY) { + result = MA_SUCCESS; + } + } + } + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_uninit(&initNotification); + } + } + + if (result != MA_SUCCESS) { + ma_resource_manager_data_buffer_node_unacquire(pResourceManager, pDataBufferNode, NULL, NULL); + goto done; + } + } +done: + if (result == MA_SUCCESS) { + if (pConfig->initialSeekPointInPCMFrames > 0) { + ma_resource_manager_data_buffer_seek_to_pcm_frame(pDataBuffer, pConfig->initialSeekPointInPCMFrames); + } + } + + ma_resource_manager_pipeline_notifications_release_all_fences(¬ifications); + + return result; +} + +MA_API ma_result ma_resource_manager_data_buffer_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_buffer* pDataBuffer) +{ + return ma_resource_manager_data_buffer_init_ex_internal(pResourceManager, pConfig, 0, pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePath = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_buffer_init_ex(pResourceManager, &config, pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePathW = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_buffer_init_ex(pResourceManager, &config, pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_buffer* pExistingDataBuffer, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_resource_manager_data_source_config config; + + if (pExistingDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pExistingDataBuffer->pNode != NULL); /* <-- If you've triggered this, you've passed in an invalid existing data buffer. */ + + config = ma_resource_manager_data_source_config_init(); + config.flags = pExistingDataBuffer->flags; + + return ma_resource_manager_data_buffer_init_ex_internal(pResourceManager, &config, pExistingDataBuffer->pNode->hashedName32, pDataBuffer); +} + +static ma_result ma_resource_manager_data_buffer_uninit_internal(ma_resource_manager_data_buffer* pDataBuffer) +{ + MA_ASSERT(pDataBuffer != NULL); + + /* The connector should be uninitialized first. */ + ma_resource_manager_data_buffer_uninit_connector(pDataBuffer->pResourceManager, pDataBuffer); + + /* With the connector uninitialized we can unacquire the node. */ + ma_resource_manager_data_buffer_node_unacquire(pDataBuffer->pResourceManager, pDataBuffer->pNode, NULL, NULL); + + /* The base data source needs to be uninitialized as well. */ + ma_data_source_uninit(&pDataBuffer->ds); + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_buffer_uninit(ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_result result; + + if (pDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_buffer_result(pDataBuffer) == MA_SUCCESS) { + /* The data buffer can be deleted synchronously. */ + return ma_resource_manager_data_buffer_uninit_internal(pDataBuffer); + } else { + /* + The data buffer needs to be deleted asynchronously because it's still loading. With the status set to MA_UNAVAILABLE, no more pages will + be loaded and the uninitialization should happen fairly quickly. Since the caller owns the data buffer, we need to wait for this event + to get processed before returning. + */ + ma_resource_manager_inline_notification notification; + ma_job job; + + /* + We need to mark the node as unavailable so we don't try reading from it anymore, but also to + let the loading thread know that it needs to abort it's loading procedure. + */ + ma_atomic_exchange_i32(&pDataBuffer->result, MA_UNAVAILABLE); + + result = ma_resource_manager_inline_notification_init(pDataBuffer->pResourceManager, ¬ification); + if (result != MA_SUCCESS) { + return result; /* Failed to create the notification. This should rarely, if ever, happen. */ + } + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER); + job.order = ma_resource_manager_data_buffer_next_execution_order(pDataBuffer); + job.data.resourceManager.freeDataBuffer.pDataBuffer = pDataBuffer; + job.data.resourceManager.freeDataBuffer.pDoneNotification = ¬ification; + job.data.resourceManager.freeDataBuffer.pDoneFence = NULL; + + result = ma_resource_manager_post_job(pDataBuffer->pResourceManager, &job); + if (result != MA_SUCCESS) { + ma_resource_manager_inline_notification_uninit(¬ification); + return result; + } + + ma_resource_manager_inline_notification_wait_and_uninit(¬ification); + } + + return result; +} + +MA_API ma_result ma_resource_manager_data_buffer_read_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 framesRead = 0; + ma_bool32 isDecodedBufferBusy = MA_FALSE; + + /* Safety. */ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + /* + We cannot be using the data buffer after it's been uninitialized. If you trigger this assert it means you're trying to read from the data buffer after + it's been uninitialized or is in the process of uninitializing. + */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + /* If the node is not initialized we need to abort with a busy code. */ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE) { + return MA_BUSY; /* Still loading. */ + } + + /* + If we've got a seek scheduled we'll want to do that before reading. However, for paged buffers, there's + a chance that the sound hasn't yet been decoded up to the seek point will result in the seek failing. If + this happens, we need to keep the seek scheduled and return MA_BUSY. + */ + if (pDataBuffer->seekToCursorOnNextRead) { + pDataBuffer->seekToCursorOnNextRead = MA_FALSE; + + result = ma_data_source_seek_to_pcm_frame(ma_resource_manager_data_buffer_get_connector(pDataBuffer), pDataBuffer->seekTargetInPCMFrames); + if (result != MA_SUCCESS) { + if (result == MA_BAD_SEEK && ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_decoded_paged) { + pDataBuffer->seekToCursorOnNextRead = MA_TRUE; /* Keep the seek scheduled. We just haven't loaded enough data yet to do the seek properly. */ + return MA_BUSY; + } + + return result; + } + } + + /* + For decoded buffers (not paged) we need to check beforehand how many frames we have available. We cannot + exceed this amount. We'll read as much as we can, and then return MA_BUSY. + */ + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_decoded) { + ma_uint64 availableFrames; + + isDecodedBufferBusy = (ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) == MA_BUSY); + + if (ma_resource_manager_data_buffer_get_available_frames(pDataBuffer, &availableFrames) == MA_SUCCESS) { + /* Don't try reading more than the available frame count. */ + if (frameCount > availableFrames) { + frameCount = availableFrames; + + /* + If there's no frames available we want to set the status to MA_AT_END. The logic below + will check if the node is busy, and if so, change it to MA_BUSY. The reason we do this + is because we don't want to call `ma_data_source_read_pcm_frames()` if the frame count + is 0 because that'll result in a situation where it's possible MA_AT_END won't get + returned. + */ + if (frameCount == 0) { + result = MA_AT_END; + } + } else { + isDecodedBufferBusy = MA_FALSE; /* We have enough frames available in the buffer to avoid a MA_BUSY status. */ + } + } + } + + /* Don't attempt to read anything if we've got no frames available. */ + if (frameCount > 0) { + result = ma_data_source_read_pcm_frames(ma_resource_manager_data_buffer_get_connector(pDataBuffer), pFramesOut, frameCount, &framesRead); + } + + /* + If we returned MA_AT_END, but the node is still loading, we don't want to return that code or else the caller will interpret the sound + as at the end and terminate decoding. + */ + if (result == MA_AT_END) { + if (ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) == MA_BUSY) { + result = MA_BUSY; + } + } + + if (isDecodedBufferBusy) { + result = MA_BUSY; + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + if (result == MA_SUCCESS && framesRead == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_resource_manager_data_buffer_seek_to_pcm_frame(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64 frameIndex) +{ + ma_result result; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + /* If we haven't yet got a connector we need to abort. */ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE) { + pDataBuffer->seekTargetInPCMFrames = frameIndex; + pDataBuffer->seekToCursorOnNextRead = MA_TRUE; + return MA_BUSY; /* Still loading. */ + } + + result = ma_data_source_seek_to_pcm_frame(ma_resource_manager_data_buffer_get_connector(pDataBuffer), frameIndex); + if (result != MA_SUCCESS) { + return result; + } + + pDataBuffer->seekTargetInPCMFrames = ~(ma_uint64)0; /* <-- For identification purposes. */ + pDataBuffer->seekToCursorOnNextRead = MA_FALSE; + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_buffer_get_data_format(ma_resource_manager_data_buffer* pDataBuffer, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: + { + return ma_data_source_get_data_format(&pDataBuffer->connector.decoder, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + }; + + case ma_resource_manager_data_supply_type_decoded: + { + *pFormat = pDataBuffer->pNode->data.backend.decoded.format; + *pChannels = pDataBuffer->pNode->data.backend.decoded.channels; + *pSampleRate = pDataBuffer->pNode->data.backend.decoded.sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pDataBuffer->pNode->data.backend.decoded.channels); + return MA_SUCCESS; + }; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + *pFormat = pDataBuffer->pNode->data.backend.decodedPaged.data.format; + *pChannels = pDataBuffer->pNode->data.backend.decodedPaged.data.channels; + *pSampleRate = pDataBuffer->pNode->data.backend.decodedPaged.sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pDataBuffer->pNode->data.backend.decoded.channels); + return MA_SUCCESS; + }; + + case ma_resource_manager_data_supply_type_unknown: + { + return MA_BUSY; /* Still loading. */ + }; + + default: + { + /* Unknown supply type. Should never hit this. */ + return MA_INVALID_ARGS; + } + } +} + +MA_API ma_result ma_resource_manager_data_buffer_get_cursor_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pCursor) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + if (pDataBuffer == NULL || pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: + { + return ma_decoder_get_cursor_in_pcm_frames(&pDataBuffer->connector.decoder, pCursor); + }; + + case ma_resource_manager_data_supply_type_decoded: + { + return ma_audio_buffer_get_cursor_in_pcm_frames(&pDataBuffer->connector.buffer, pCursor); + }; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + return ma_paged_audio_buffer_get_cursor_in_pcm_frames(&pDataBuffer->connector.pagedBuffer, pCursor); + }; + + case ma_resource_manager_data_supply_type_unknown: + { + return MA_BUSY; + }; + + default: + { + return MA_INVALID_ARGS; + } + } +} + +MA_API ma_result ma_resource_manager_data_buffer_get_length_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pLength) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + if (pDataBuffer == NULL || pLength == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_unknown) { + return MA_BUSY; /* Still loading. */ + } + + return ma_data_source_get_length_in_pcm_frames(ma_resource_manager_data_buffer_get_connector(pDataBuffer), pLength); +} + +MA_API ma_result ma_resource_manager_data_buffer_result(const ma_resource_manager_data_buffer* pDataBuffer) +{ + if (pDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return (ma_result)ma_atomic_load_i32((ma_result*)&pDataBuffer->result); /* Need a naughty const-cast here. */ +} + +MA_API ma_result ma_resource_manager_data_buffer_set_looping(ma_resource_manager_data_buffer* pDataBuffer, ma_bool32 isLooping) +{ + return ma_data_source_set_looping(pDataBuffer, isLooping); +} + +MA_API ma_bool32 ma_resource_manager_data_buffer_is_looping(const ma_resource_manager_data_buffer* pDataBuffer) +{ + return ma_data_source_is_looping(pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_get_available_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_unknown) { + if (ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) == MA_BUSY) { + return MA_BUSY; + } else { + return MA_INVALID_OPERATION; /* No connector. */ + } + } + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: + { + return ma_decoder_get_available_frames(&pDataBuffer->connector.decoder, pAvailableFrames); + }; + + case ma_resource_manager_data_supply_type_decoded: + { + return ma_audio_buffer_get_available_frames(&pDataBuffer->connector.buffer, pAvailableFrames); + }; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + ma_uint64 cursor; + ma_paged_audio_buffer_get_cursor_in_pcm_frames(&pDataBuffer->connector.pagedBuffer, &cursor); + + if (pDataBuffer->pNode->data.backend.decodedPaged.decodedFrameCount > cursor) { + *pAvailableFrames = pDataBuffer->pNode->data.backend.decodedPaged.decodedFrameCount - cursor; + } else { + *pAvailableFrames = 0; + } + + return MA_SUCCESS; + }; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unknown supply type. Should never hit this. */ + return MA_INVALID_ARGS; + } + } +} + +MA_API ma_result ma_resource_manager_register_file(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags) +{ + return ma_resource_manager_data_buffer_node_acquire(pResourceManager, pFilePath, NULL, 0, flags, NULL, NULL, NULL, NULL); +} + +MA_API ma_result ma_resource_manager_register_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags) +{ + return ma_resource_manager_data_buffer_node_acquire(pResourceManager, NULL, pFilePath, 0, flags, NULL, NULL, NULL, NULL); +} + + +static ma_result ma_resource_manager_register_data(ma_resource_manager* pResourceManager, const char* pName, const wchar_t* pNameW, ma_resource_manager_data_supply* pExistingData) +{ + return ma_resource_manager_data_buffer_node_acquire(pResourceManager, pName, pNameW, 0, 0, pExistingData, NULL, NULL, NULL); +} + +static ma_result ma_resource_manager_register_decoded_data_internal(ma_resource_manager* pResourceManager, const char* pName, const wchar_t* pNameW, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + ma_resource_manager_data_supply data; + data.type = ma_resource_manager_data_supply_type_decoded; + data.backend.decoded.pData = pData; + data.backend.decoded.totalFrameCount = frameCount; + data.backend.decoded.format = format; + data.backend.decoded.channels = channels; + data.backend.decoded.sampleRate = sampleRate; + + return ma_resource_manager_register_data(pResourceManager, pName, pNameW, &data); +} + +MA_API ma_result ma_resource_manager_register_decoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + return ma_resource_manager_register_decoded_data_internal(pResourceManager, pName, NULL, pData, frameCount, format, channels, sampleRate); +} + +MA_API ma_result ma_resource_manager_register_decoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + return ma_resource_manager_register_decoded_data_internal(pResourceManager, NULL, pName, pData, frameCount, format, channels, sampleRate); +} + + +static ma_result ma_resource_manager_register_encoded_data_internal(ma_resource_manager* pResourceManager, const char* pName, const wchar_t* pNameW, const void* pData, size_t sizeInBytes) +{ + ma_resource_manager_data_supply data; + data.type = ma_resource_manager_data_supply_type_encoded; + data.backend.encoded.pData = pData; + data.backend.encoded.sizeInBytes = sizeInBytes; + + return ma_resource_manager_register_data(pResourceManager, pName, pNameW, &data); +} + +MA_API ma_result ma_resource_manager_register_encoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, size_t sizeInBytes) +{ + return ma_resource_manager_register_encoded_data_internal(pResourceManager, pName, NULL, pData, sizeInBytes); +} + +MA_API ma_result ma_resource_manager_register_encoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, size_t sizeInBytes) +{ + return ma_resource_manager_register_encoded_data_internal(pResourceManager, NULL, pName, pData, sizeInBytes); +} + + +MA_API ma_result ma_resource_manager_unregister_file(ma_resource_manager* pResourceManager, const char* pFilePath) +{ + return ma_resource_manager_unregister_data(pResourceManager, pFilePath); +} + +MA_API ma_result ma_resource_manager_unregister_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath) +{ + return ma_resource_manager_unregister_data_w(pResourceManager, pFilePath); +} + +MA_API ma_result ma_resource_manager_unregister_data(ma_resource_manager* pResourceManager, const char* pName) +{ + return ma_resource_manager_data_buffer_node_unacquire(pResourceManager, NULL, pName, NULL); +} + +MA_API ma_result ma_resource_manager_unregister_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName) +{ + return ma_resource_manager_data_buffer_node_unacquire(pResourceManager, NULL, NULL, pName); +} + + +static ma_uint32 ma_resource_manager_data_stream_next_execution_order(ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + return ma_atomic_fetch_add_32(&pDataStream->executionCounter, 1); +} + +static ma_bool32 ma_resource_manager_data_stream_is_decoder_at_end(const ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + return ma_atomic_load_32((ma_bool32*)&pDataStream->isDecoderAtEnd); +} + +static ma_uint32 ma_resource_manager_data_stream_seek_counter(const ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + return ma_atomic_load_32((ma_uint32*)&pDataStream->seekCounter); +} + + +static ma_result ma_resource_manager_data_stream_cb__read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_resource_manager_data_stream_read_pcm_frames((ma_resource_manager_data_stream*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_resource_manager_data_stream_cb__seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_resource_manager_data_stream_seek_to_pcm_frame((ma_resource_manager_data_stream*)pDataSource, frameIndex); +} + +static ma_result ma_resource_manager_data_stream_cb__get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_resource_manager_data_stream_get_data_format((ma_resource_manager_data_stream*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_resource_manager_data_stream_cb__get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_resource_manager_data_stream_get_cursor_in_pcm_frames((ma_resource_manager_data_stream*)pDataSource, pCursor); +} + +static ma_result ma_resource_manager_data_stream_cb__get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_resource_manager_data_stream_get_length_in_pcm_frames((ma_resource_manager_data_stream*)pDataSource, pLength); +} + +static ma_result ma_resource_manager_data_stream_cb__set_looping(ma_data_source* pDataSource, ma_bool32 isLooping) +{ + ma_resource_manager_data_stream* pDataStream = (ma_resource_manager_data_stream*)pDataSource; + MA_ASSERT(pDataStream != NULL); + + ma_atomic_exchange_32(&pDataStream->isLooping, isLooping); + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_resource_manager_data_stream_vtable = +{ + ma_resource_manager_data_stream_cb__read_pcm_frames, + ma_resource_manager_data_stream_cb__seek_to_pcm_frame, + ma_resource_manager_data_stream_cb__get_data_format, + ma_resource_manager_data_stream_cb__get_cursor_in_pcm_frames, + ma_resource_manager_data_stream_cb__get_length_in_pcm_frames, + ma_resource_manager_data_stream_cb__set_looping, + 0 /*MA_DATA_SOURCE_SELF_MANAGED_RANGE_AND_LOOP_POINT*/ +}; + +static void ma_resource_manager_data_stream_set_absolute_cursor(ma_resource_manager_data_stream* pDataStream, ma_uint64 absoluteCursor) +{ + /* Loop if possible. */ + if (absoluteCursor > pDataStream->totalLengthInPCMFrames && pDataStream->totalLengthInPCMFrames > 0) { + absoluteCursor = absoluteCursor % pDataStream->totalLengthInPCMFrames; + } + + ma_atomic_exchange_64(&pDataStream->absoluteCursor, absoluteCursor); +} + +MA_API ma_result ma_resource_manager_data_stream_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_stream* pDataStream) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + char* pFilePathCopy = NULL; + wchar_t* pFilePathWCopy = NULL; + ma_job job; + ma_bool32 waitBeforeReturning = MA_FALSE; + ma_resource_manager_inline_notification waitNotification; + ma_resource_manager_pipeline_notifications notifications; + + if (pDataStream == NULL) { + if (pConfig != NULL && pConfig->pNotifications != NULL) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(pConfig->pNotifications); + } + + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataStream); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pNotifications != NULL) { + notifications = *pConfig->pNotifications; /* From here on out, `notifications` should be used instead of `pNotifications`. Setting this to NULL to catch any errors at testing time. */ + } else { + MA_ZERO_OBJECT(¬ifications); + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_resource_manager_data_stream_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pDataStream->ds); + if (result != MA_SUCCESS) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + return result; + } + + pDataStream->pResourceManager = pResourceManager; + pDataStream->flags = pConfig->flags; + pDataStream->result = MA_BUSY; + + ma_data_source_set_range_in_pcm_frames(pDataStream, pConfig->rangeBegInPCMFrames, pConfig->rangeEndInPCMFrames); + ma_data_source_set_loop_point_in_pcm_frames(pDataStream, pConfig->loopPointBegInPCMFrames, pConfig->loopPointEndInPCMFrames); + ma_data_source_set_looping(pDataStream, pConfig->isLooping); + + if (pResourceManager == NULL || (pConfig->pFilePath == NULL && pConfig->pFilePathW == NULL)) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + return MA_INVALID_ARGS; + } + + /* We want all access to the VFS and the internal decoder to happen on the job thread just to keep things easier to manage for the VFS. */ + + /* We need a copy of the file path. We should probably make this more efficient, but for now we'll do a transient memory allocation. */ + if (pConfig->pFilePath != NULL) { + pFilePathCopy = ma_copy_string(pConfig->pFilePath, &pResourceManager->config.allocationCallbacks); + } else { + pFilePathWCopy = ma_copy_string_w(pConfig->pFilePathW, &pResourceManager->config.allocationCallbacks); + } + + if (pFilePathCopy == NULL && pFilePathWCopy == NULL) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + return MA_OUT_OF_MEMORY; + } + + /* + We need to check for the presence of MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC. If it's not set, we need to wait before returning. Otherwise we + can return immediately. Likewise, we'll also check for MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT and do the same. + */ + if ((pConfig->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) == 0 || (pConfig->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + waitBeforeReturning = MA_TRUE; + ma_resource_manager_inline_notification_init(pResourceManager, &waitNotification); + } + + ma_resource_manager_pipeline_notifications_acquire_all_fences(¬ifications); + + /* Set the absolute cursor to our initial seek position so retrieval of the cursor returns a good value. */ + ma_resource_manager_data_stream_set_absolute_cursor(pDataStream, pConfig->initialSeekPointInPCMFrames); + + /* We now have everything we need to post the job. This is the last thing we need to do from here. The rest will be done by the job thread. */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.loadDataStream.pDataStream = pDataStream; + job.data.resourceManager.loadDataStream.pFilePath = pFilePathCopy; + job.data.resourceManager.loadDataStream.pFilePathW = pFilePathWCopy; + job.data.resourceManager.loadDataStream.initialSeekPoint = pConfig->initialSeekPointInPCMFrames; + job.data.resourceManager.loadDataStream.pInitNotification = (waitBeforeReturning == MA_TRUE) ? &waitNotification : notifications.init.pNotification; + job.data.resourceManager.loadDataStream.pInitFence = notifications.init.pFence; + result = ma_resource_manager_post_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + ma_resource_manager_pipeline_notifications_release_all_fences(¬ifications); + + if (waitBeforeReturning) { + ma_resource_manager_inline_notification_uninit(&waitNotification); + } + + ma_free(pFilePathCopy, &pResourceManager->config.allocationCallbacks); + ma_free(pFilePathWCopy, &pResourceManager->config.allocationCallbacks); + return result; + } + + /* Wait if needed. */ + if (waitBeforeReturning) { + ma_resource_manager_inline_notification_wait_and_uninit(&waitNotification); + + if (notifications.init.pNotification != NULL) { + ma_async_notification_signal(notifications.init.pNotification); + } + + /* + If there was an error during initialization make sure we return that result here. We don't want to do this + if we're not waiting because it will most likely be in a busy state. + */ + if (pDataStream->result != MA_SUCCESS) { + return pDataStream->result; + } + + /* NOTE: Do not release pInitFence here. That will be done by the job. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_stream_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePath = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_stream_init_ex(pResourceManager, &config, pDataStream); +} + +MA_API ma_result ma_resource_manager_data_stream_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePathW = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_stream_init_ex(pResourceManager, &config, pDataStream); +} + +MA_API ma_result ma_resource_manager_data_stream_uninit(ma_resource_manager_data_stream* pDataStream) +{ + ma_resource_manager_inline_notification freeEvent; + ma_job job; + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + /* The first thing to do is set the result to unavailable. This will prevent future page decoding. */ + ma_atomic_exchange_i32(&pDataStream->result, MA_UNAVAILABLE); + + /* + We need to post a job to ensure we're not in the middle or decoding or anything. Because the object is owned by the caller, we'll need + to wait for it to complete before returning which means we need an event. + */ + ma_resource_manager_inline_notification_init(pDataStream->pResourceManager, &freeEvent); + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.freeDataStream.pDataStream = pDataStream; + job.data.resourceManager.freeDataStream.pDoneNotification = &freeEvent; + job.data.resourceManager.freeDataStream.pDoneFence = NULL; + ma_resource_manager_post_job(pDataStream->pResourceManager, &job); + + /* We need to wait for the job to finish processing before we return. */ + ma_resource_manager_inline_notification_wait_and_uninit(&freeEvent); + + return MA_SUCCESS; +} + + +static ma_uint32 ma_resource_manager_data_stream_get_page_size_in_frames(ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + MA_ASSERT(pDataStream->isDecoderInitialized == MA_TRUE); + + return MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS * (pDataStream->decoder.outputSampleRate/1000); +} + +static void* ma_resource_manager_data_stream_get_page_data_pointer(ma_resource_manager_data_stream* pDataStream, ma_uint32 pageIndex, ma_uint32 relativeCursor) +{ + MA_ASSERT(pDataStream != NULL); + MA_ASSERT(pDataStream->isDecoderInitialized == MA_TRUE); + MA_ASSERT(pageIndex == 0 || pageIndex == 1); + + return ma_offset_ptr(pDataStream->pPageData, ((ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream) * pageIndex) + relativeCursor) * ma_get_bytes_per_frame(pDataStream->decoder.outputFormat, pDataStream->decoder.outputChannels)); +} + +static void ma_resource_manager_data_stream_fill_page(ma_resource_manager_data_stream* pDataStream, ma_uint32 pageIndex) +{ + ma_result result = MA_SUCCESS; + ma_uint64 pageSizeInFrames; + ma_uint64 totalFramesReadForThisPage = 0; + void* pPageData = ma_resource_manager_data_stream_get_page_data_pointer(pDataStream, pageIndex, 0); + + pageSizeInFrames = ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream); + + /* The decoder needs to inherit the stream's looping and range state. */ + { + ma_uint64 rangeBeg; + ma_uint64 rangeEnd; + ma_uint64 loopPointBeg; + ma_uint64 loopPointEnd; + + ma_data_source_set_looping(&pDataStream->decoder, ma_resource_manager_data_stream_is_looping(pDataStream)); + + ma_data_source_get_range_in_pcm_frames(pDataStream, &rangeBeg, &rangeEnd); + ma_data_source_set_range_in_pcm_frames(&pDataStream->decoder, rangeBeg, rangeEnd); + + ma_data_source_get_loop_point_in_pcm_frames(pDataStream, &loopPointBeg, &loopPointEnd); + ma_data_source_set_loop_point_in_pcm_frames(&pDataStream->decoder, loopPointBeg, loopPointEnd); + } + + /* Just read straight from the decoder. It will deal with ranges and looping for us. */ + result = ma_data_source_read_pcm_frames(&pDataStream->decoder, pPageData, pageSizeInFrames, &totalFramesReadForThisPage); + if (result == MA_AT_END || totalFramesReadForThisPage < pageSizeInFrames) { + ma_atomic_exchange_32(&pDataStream->isDecoderAtEnd, MA_TRUE); + } + + ma_atomic_exchange_32(&pDataStream->pageFrameCount[pageIndex], (ma_uint32)totalFramesReadForThisPage); + ma_atomic_exchange_32(&pDataStream->isPageValid[pageIndex], MA_TRUE); +} + +static void ma_resource_manager_data_stream_fill_pages(ma_resource_manager_data_stream* pDataStream) +{ + ma_uint32 iPage; + + MA_ASSERT(pDataStream != NULL); + + for (iPage = 0; iPage < 2; iPage += 1) { + ma_resource_manager_data_stream_fill_page(pDataStream, iPage); + } +} + + +static ma_result ma_resource_manager_data_stream_map(ma_resource_manager_data_stream* pDataStream, void** ppFramesOut, ma_uint64* pFrameCount) +{ + ma_uint64 framesAvailable; + ma_uint64 frameCount = 0; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pFrameCount != NULL) { + frameCount = *pFrameCount; + *pFrameCount = 0; + } + if (ppFramesOut != NULL) { + *ppFramesOut = NULL; + } + + if (pDataStream == NULL || ppFramesOut == NULL || pFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* Don't attempt to read while we're in the middle of seeking. Tell the caller that we're busy. */ + if (ma_resource_manager_data_stream_seek_counter(pDataStream) > 0) { + return MA_BUSY; + } + + /* If the page we're on is invalid it means we've caught up to the job thread. */ + if (ma_atomic_load_32(&pDataStream->isPageValid[pDataStream->currentPageIndex]) == MA_FALSE) { + framesAvailable = 0; + } else { + /* + The page we're on is valid so we must have some frames available. We need to make sure that we don't overflow into the next page, even if it's valid. The reason is + that the unmap process will only post an update for one page at a time. Keeping mapping tied to page boundaries makes this simpler. + */ + ma_uint32 currentPageFrameCount = ma_atomic_load_32(&pDataStream->pageFrameCount[pDataStream->currentPageIndex]); + MA_ASSERT(currentPageFrameCount >= pDataStream->relativeCursor); + + framesAvailable = currentPageFrameCount - pDataStream->relativeCursor; + } + + /* If there's no frames available and the result is set to MA_AT_END we need to return MA_AT_END. */ + if (framesAvailable == 0) { + if (ma_resource_manager_data_stream_is_decoder_at_end(pDataStream)) { + return MA_AT_END; + } else { + return MA_BUSY; /* There are no frames available, but we're not marked as EOF so we might have caught up to the job thread. Need to return MA_BUSY and wait for more data. */ + } + } + + MA_ASSERT(framesAvailable > 0); + + if (frameCount > framesAvailable) { + frameCount = framesAvailable; + } + + *ppFramesOut = ma_resource_manager_data_stream_get_page_data_pointer(pDataStream, pDataStream->currentPageIndex, pDataStream->relativeCursor); + *pFrameCount = frameCount; + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_stream_unmap(ma_resource_manager_data_stream* pDataStream, ma_uint64 frameCount) +{ + ma_uint32 newRelativeCursor; + ma_uint32 pageSizeInFrames; + ma_job job; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* The frame count should always fit inside a 32-bit integer. */ + if (frameCount > 0xFFFFFFFF) { + return MA_INVALID_ARGS; + } + + pageSizeInFrames = ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream); + + /* The absolute cursor needs to be updated for ma_resource_manager_data_stream_get_cursor_in_pcm_frames(). */ + ma_resource_manager_data_stream_set_absolute_cursor(pDataStream, ma_atomic_load_64(&pDataStream->absoluteCursor) + frameCount); + + /* Here is where we need to check if we need to load a new page, and if so, post a job to load it. */ + newRelativeCursor = pDataStream->relativeCursor + (ma_uint32)frameCount; + + /* If the new cursor has flowed over to the next page we need to mark the old one as invalid and post an event for it. */ + if (newRelativeCursor >= pageSizeInFrames) { + newRelativeCursor -= pageSizeInFrames; + + /* Here is where we post the job start decoding. */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.pageDataStream.pDataStream = pDataStream; + job.data.resourceManager.pageDataStream.pageIndex = pDataStream->currentPageIndex; + + /* The page needs to be marked as invalid so that the public API doesn't try reading from it. */ + ma_atomic_exchange_32(&pDataStream->isPageValid[pDataStream->currentPageIndex], MA_FALSE); + + /* Before posting the job we need to make sure we set some state. */ + pDataStream->relativeCursor = newRelativeCursor; + pDataStream->currentPageIndex = (pDataStream->currentPageIndex + 1) & 0x01; + return ma_resource_manager_post_job(pDataStream->pResourceManager, &job); + } else { + /* We haven't moved into a new page so we can just move the cursor forward. */ + pDataStream->relativeCursor = newRelativeCursor; + return MA_SUCCESS; + } +} + + +MA_API ma_result ma_resource_manager_data_stream_read_pcm_frames(ma_resource_manager_data_stream* pDataStream, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesProcessed; + ma_format format; + ma_uint32 channels; + + /* Safety. */ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* Don't attempt to read while we're in the middle of seeking. Tell the caller that we're busy. */ + if (ma_resource_manager_data_stream_seek_counter(pDataStream) > 0) { + return MA_BUSY; + } + + ma_resource_manager_data_stream_get_data_format(pDataStream, &format, &channels, NULL, NULL, 0); + + /* Reading is implemented in terms of map/unmap. We need to run this in a loop because mapping is clamped against page boundaries. */ + totalFramesProcessed = 0; + while (totalFramesProcessed < frameCount) { + void* pMappedFrames; + ma_uint64 mappedFrameCount; + + mappedFrameCount = frameCount - totalFramesProcessed; + result = ma_resource_manager_data_stream_map(pDataStream, &pMappedFrames, &mappedFrameCount); + if (result != MA_SUCCESS) { + break; + } + + /* Copy the mapped data to the output buffer if we have one. It's allowed for pFramesOut to be NULL in which case a relative forward seek is performed. */ + if (pFramesOut != NULL) { + ma_copy_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesProcessed, format, channels), pMappedFrames, mappedFrameCount, format, channels); + } + + totalFramesProcessed += mappedFrameCount; + + result = ma_resource_manager_data_stream_unmap(pDataStream, mappedFrameCount); + if (result != MA_SUCCESS) { + break; /* This is really bad - will only get an error here if we failed to post a job to the queue for loading the next page. */ + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesProcessed; + } + + if (result == MA_SUCCESS && totalFramesProcessed == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_resource_manager_data_stream_seek_to_pcm_frame(ma_resource_manager_data_stream* pDataStream, ma_uint64 frameIndex) +{ + ma_job job; + ma_result streamResult; + + streamResult = ma_resource_manager_data_stream_result(pDataStream); + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(streamResult != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (streamResult != MA_SUCCESS && streamResult != MA_BUSY) { + return MA_INVALID_OPERATION; + } + + /* If we're not already seeking and we're sitting on the same frame, just make this a no-op. */ + if (ma_atomic_load_32(&pDataStream->seekCounter) == 0) { + if (ma_atomic_load_64(&pDataStream->absoluteCursor) == frameIndex) { + return MA_SUCCESS; + } + } + + + /* Increment the seek counter first to indicate to read_paged_pcm_frames() and map_paged_pcm_frames() that we are in the middle of a seek and MA_BUSY should be returned. */ + ma_atomic_fetch_add_32(&pDataStream->seekCounter, 1); + + /* Update the absolute cursor so that ma_resource_manager_data_stream_get_cursor_in_pcm_frames() returns the new position. */ + ma_resource_manager_data_stream_set_absolute_cursor(pDataStream, frameIndex); + + /* + We need to clear our currently loaded pages so that the stream starts playback from the new seek point as soon as possible. These are for the purpose of the public + API and will be ignored by the seek job. The seek job will operate on the assumption that both pages have been marked as invalid and the cursor is at the start of + the first page. + */ + pDataStream->relativeCursor = 0; + pDataStream->currentPageIndex = 0; + ma_atomic_exchange_32(&pDataStream->isPageValid[0], MA_FALSE); + ma_atomic_exchange_32(&pDataStream->isPageValid[1], MA_FALSE); + + /* Make sure the data stream is not marked as at the end or else if we seek in response to hitting the end, we won't be able to read any more data. */ + ma_atomic_exchange_32(&pDataStream->isDecoderAtEnd, MA_FALSE); + + /* + The public API is not allowed to touch the internal decoder so we need to use a job to perform the seek. When seeking, the job thread will assume both pages + are invalid and any content contained within them will be discarded and replaced with newly decoded data. + */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_SEEK_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.seekDataStream.pDataStream = pDataStream; + job.data.resourceManager.seekDataStream.frameIndex = frameIndex; + return ma_resource_manager_post_job(pDataStream->pResourceManager, &job); +} + +MA_API ma_result ma_resource_manager_data_stream_get_data_format(ma_resource_manager_data_stream* pDataStream, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + + if (pChannels != NULL) { + *pChannels = 0; + } + + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* + We're being a little bit naughty here and accessing the internal decoder from the public API. The output data format is constant, and we've defined this function + such that the application is responsible for ensuring it's not called while uninitializing so it should be safe. + */ + return ma_data_source_get_data_format(&pDataStream->decoder, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +MA_API ma_result ma_resource_manager_data_stream_get_cursor_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pCursor) +{ + ma_result result; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + /* + If the stream is in an erroneous state we need to return an invalid operation. We can allow + this to be called when the data stream is in a busy state because the caller may have asked + for an initial seek position and it's convenient to return that as the cursor position. + */ + result = ma_resource_manager_data_stream_result(pDataStream); + if (result != MA_SUCCESS && result != MA_BUSY) { + return MA_INVALID_OPERATION; + } + + *pCursor = ma_atomic_load_64(&pDataStream->absoluteCursor); + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_stream_get_length_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pLength) +{ + ma_result streamResult; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + streamResult = ma_resource_manager_data_stream_result(pDataStream); + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(streamResult != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (streamResult != MA_SUCCESS) { + return streamResult; + } + + /* + We most definitely do not want to be calling ma_decoder_get_length_in_pcm_frames() directly. Instead we want to use a cached value that we + calculated when we initialized it on the job thread. + */ + *pLength = pDataStream->totalLengthInPCMFrames; + if (*pLength == 0) { + return MA_NOT_IMPLEMENTED; /* Some decoders may not have a known length. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_stream_result(const ma_resource_manager_data_stream* pDataStream) +{ + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + return (ma_result)ma_atomic_load_i32(&pDataStream->result); +} + +MA_API ma_result ma_resource_manager_data_stream_set_looping(ma_resource_manager_data_stream* pDataStream, ma_bool32 isLooping) +{ + return ma_data_source_set_looping(pDataStream, isLooping); +} + +MA_API ma_bool32 ma_resource_manager_data_stream_is_looping(const ma_resource_manager_data_stream* pDataStream) +{ + if (pDataStream == NULL) { + return MA_FALSE; + } + + return ma_atomic_load_32((ma_bool32*)&pDataStream->isLooping); /* Naughty const-cast. Value won't change from here in practice (maybe from another thread). */ +} + +MA_API ma_result ma_resource_manager_data_stream_get_available_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pAvailableFrames) +{ + ma_uint32 pageIndex0; + ma_uint32 pageIndex1; + ma_uint32 relativeCursor; + ma_uint64 availableFrames; + + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + pageIndex0 = pDataStream->currentPageIndex; + pageIndex1 = (pDataStream->currentPageIndex + 1) & 0x01; + relativeCursor = pDataStream->relativeCursor; + + availableFrames = 0; + if (ma_atomic_load_32(&pDataStream->isPageValid[pageIndex0])) { + availableFrames += ma_atomic_load_32(&pDataStream->pageFrameCount[pageIndex0]) - relativeCursor; + if (ma_atomic_load_32(&pDataStream->isPageValid[pageIndex1])) { + availableFrames += ma_atomic_load_32(&pDataStream->pageFrameCount[pageIndex1]); + } + } + + *pAvailableFrames = availableFrames; + return MA_SUCCESS; +} + + +static ma_result ma_resource_manager_data_source_preinit(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataSource); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + pDataSource->flags = pConfig->flags; + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_source_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_source* pDataSource) +{ + ma_result result; + + result = ma_resource_manager_data_source_preinit(pResourceManager, pConfig, pDataSource); + if (result != MA_SUCCESS) { + return result; + } + + /* The data source itself is just a data stream or a data buffer. */ + if ((pConfig->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_init_ex(pResourceManager, pConfig, &pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_init_ex(pResourceManager, pConfig, &pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_init(ma_resource_manager* pResourceManager, const char* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePath = pName; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_source_init_ex(pResourceManager, &config, pDataSource); +} + +MA_API ma_result ma_resource_manager_data_source_init_w(ma_resource_manager* pResourceManager, const wchar_t* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePathW = pName; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_source_init_ex(pResourceManager, &config, pDataSource); +} + +MA_API ma_result ma_resource_manager_data_source_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source* pExistingDataSource, ma_resource_manager_data_source* pDataSource) +{ + ma_result result; + ma_resource_manager_data_source_config config; + + if (pExistingDataSource == NULL) { + return MA_INVALID_ARGS; + } + + config = ma_resource_manager_data_source_config_init(); + config.flags = pExistingDataSource->flags; + + result = ma_resource_manager_data_source_preinit(pResourceManager, &config, pDataSource); + if (result != MA_SUCCESS) { + return result; + } + + /* Copying can only be done from data buffers. Streams cannot be copied. */ + if ((pExistingDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return MA_INVALID_OPERATION; + } + + return ma_resource_manager_data_buffer_init_copy(pResourceManager, &pExistingDataSource->backend.buffer, &pDataSource->backend.buffer); +} + +MA_API ma_result ma_resource_manager_data_source_uninit(ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + /* All we need to is uninitialize the underlying data buffer or data stream. */ + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_uninit(&pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_uninit(&pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_read_pcm_frames(ma_resource_manager_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + /* Safety. */ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_read_pcm_frames(&pDataSource->backend.stream, pFramesOut, frameCount, pFramesRead); + } else { + return ma_resource_manager_data_buffer_read_pcm_frames(&pDataSource->backend.buffer, pFramesOut, frameCount, pFramesRead); + } +} + +MA_API ma_result ma_resource_manager_data_source_seek_to_pcm_frame(ma_resource_manager_data_source* pDataSource, ma_uint64 frameIndex) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_seek_to_pcm_frame(&pDataSource->backend.stream, frameIndex); + } else { + return ma_resource_manager_data_buffer_seek_to_pcm_frame(&pDataSource->backend.buffer, frameIndex); + } +} + +MA_API ma_result ma_resource_manager_data_source_map(ma_resource_manager_data_source* pDataSource, void** ppFramesOut, ma_uint64* pFrameCount) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_map(&pDataSource->backend.stream, ppFramesOut, pFrameCount); + } else { + return MA_NOT_IMPLEMENTED; /* Mapping not supported with data buffers. */ + } +} + +MA_API ma_result ma_resource_manager_data_source_unmap(ma_resource_manager_data_source* pDataSource, ma_uint64 frameCount) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_unmap(&pDataSource->backend.stream, frameCount); + } else { + return MA_NOT_IMPLEMENTED; /* Mapping not supported with data buffers. */ + } +} + +MA_API ma_result ma_resource_manager_data_source_get_data_format(ma_resource_manager_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_data_format(&pDataSource->backend.stream, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + } else { + return ma_resource_manager_data_buffer_get_data_format(&pDataSource->backend.buffer, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + } +} + +MA_API ma_result ma_resource_manager_data_source_get_cursor_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pCursor) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_cursor_in_pcm_frames(&pDataSource->backend.stream, pCursor); + } else { + return ma_resource_manager_data_buffer_get_cursor_in_pcm_frames(&pDataSource->backend.buffer, pCursor); + } +} + +MA_API ma_result ma_resource_manager_data_source_get_length_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pLength) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_length_in_pcm_frames(&pDataSource->backend.stream, pLength); + } else { + return ma_resource_manager_data_buffer_get_length_in_pcm_frames(&pDataSource->backend.buffer, pLength); + } +} + +MA_API ma_result ma_resource_manager_data_source_result(const ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_result(&pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_result(&pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_set_looping(ma_resource_manager_data_source* pDataSource, ma_bool32 isLooping) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_set_looping(&pDataSource->backend.stream, isLooping); + } else { + return ma_resource_manager_data_buffer_set_looping(&pDataSource->backend.buffer, isLooping); + } +} + +MA_API ma_bool32 ma_resource_manager_data_source_is_looping(const ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_FALSE; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_is_looping(&pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_is_looping(&pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_get_available_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_available_frames(&pDataSource->backend.stream, pAvailableFrames); + } else { + return ma_resource_manager_data_buffer_get_available_frames(&pDataSource->backend.buffer, pAvailableFrames); + } +} + + +MA_API ma_result ma_resource_manager_post_job(ma_resource_manager* pResourceManager, const ma_job* pJob) +{ + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_post(&pResourceManager->jobQueue, pJob); +} + +MA_API ma_result ma_resource_manager_post_job_quit(ma_resource_manager* pResourceManager) +{ + ma_job job = ma_job_init(MA_JOB_TYPE_QUIT); + return ma_resource_manager_post_job(pResourceManager, &job); +} + +MA_API ma_result ma_resource_manager_next_job(ma_resource_manager* pResourceManager, ma_job* pJob) +{ + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_next(&pResourceManager->jobQueue, pJob); +} + + +static ma_result ma_job_process__resource_manager__load_data_buffer_node(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + MA_ASSERT(pJob != NULL); + + pResourceManager = (ma_resource_manager*)pJob->data.resourceManager.loadDataBufferNode.pResourceManager; + MA_ASSERT(pResourceManager != NULL); + + pDataBufferNode = (ma_resource_manager_data_buffer_node*)pJob->data.resourceManager.loadDataBufferNode.pDataBufferNode; + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDataBufferNode->isDataOwnedByResourceManager == MA_TRUE); /* The data should always be owned by the resource manager. */ + + /* The data buffer is not getting deleted, but we may be getting executed out of order. If so, we need to push the job back onto the queue and return. */ + if (pJob->order != ma_atomic_load_32(&pDataBufferNode->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Attempting to execute out of order. Probably interleaved with a MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER job. */ + } + + /* First thing we need to do is check whether or not the data buffer is getting deleted. If so we just abort. */ + if (ma_resource_manager_data_buffer_node_result(pDataBufferNode) != MA_BUSY) { + result = ma_resource_manager_data_buffer_node_result(pDataBufferNode); /* The data buffer may be getting deleted before it's even been loaded. */ + goto done; + } + + /* + We're ready to start loading. Essentially what we're doing here is initializing the data supply + of the node. Once this is complete, data buffers can have their connectors initialized which + will allow then to have audio data read from them. + + Note that when the data supply type has been moved away from "unknown", that is when other threads + will determine that the node is available for data delivery and the data buffer connectors can be + initialized. Therefore, it's important that it is set after the data supply has been initialized. + */ + if ((pJob->data.resourceManager.loadDataBufferNode.flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE) != 0) { + /* + Decoding. This is the complex case because we're not going to be doing the entire decoding + process here. Instead it's going to be split of multiple jobs and loaded in pages. The + reason for this is to evenly distribute decoding time across multiple sounds, rather than + having one huge sound hog all the available processing resources. + + The first thing we do is initialize a decoder. This is allocated on the heap and is passed + around to the paging jobs. When the last paging job has completed it's processing, it'll + free the decoder for us. + + This job does not do any actual decoding. It instead just posts a PAGE_DATA_BUFFER_NODE job + which is where the actual decoding work will be done. However, once this job is complete, + the node will be in a state where data buffer connectors can be initialized. + */ + ma_decoder* pDecoder; /* <-- Free'd on the last page decode. */ + ma_job pageDataBufferNodeJob; + + /* Allocate the decoder by initializing a decoded data supply. */ + result = ma_resource_manager_data_buffer_node_init_supply_decoded(pResourceManager, pDataBufferNode, pJob->data.resourceManager.loadDataBufferNode.pFilePath, pJob->data.resourceManager.loadDataBufferNode.pFilePathW, pJob->data.resourceManager.loadDataBufferNode.flags, &pDecoder); + + /* + Don't ever propagate an MA_BUSY result code or else the resource manager will think the + node is just busy decoding rather than in an error state. This should never happen, but + including this logic for safety just in case. + */ + if (result == MA_BUSY) { + result = MA_ERROR; + } + + if (result != MA_SUCCESS) { + if (pJob->data.resourceManager.loadDataBufferNode.pFilePath != NULL) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to initialize data supply for \"%s\". %s.\n", pJob->data.resourceManager.loadDataBufferNode.pFilePath, ma_result_description(result)); + } else { +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(_MSC_VER) + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to initialize data supply for \"%ls\", %s.\n", pJob->data.resourceManager.loadDataBufferNode.pFilePathW, ma_result_description(result)); +#endif + } + + goto done; + } + + /* + At this point the node's data supply is initialized and other threads can start initializing + their data buffer connectors. However, no data will actually be available until we start to + actually decode it. To do this, we need to post a paging job which is where the decoding + work is done. + + Note that if an error occurred at an earlier point, this section will have been skipped. + */ + pageDataBufferNodeJob = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE); + pageDataBufferNodeJob.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pResourceManager = pResourceManager; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDataBufferNode = pDataBufferNode; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDecoder = pDecoder; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDoneNotification = pJob->data.resourceManager.loadDataBufferNode.pDoneNotification; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDoneFence = pJob->data.resourceManager.loadDataBufferNode.pDoneFence; + + /* The job has been set up so it can now be posted. */ + result = ma_resource_manager_post_job(pResourceManager, &pageDataBufferNodeJob); + + /* + When we get here, we want to make sure the result code is set to MA_BUSY. The reason for + this is that the result will be copied over to the node's internal result variable. In + this case, since the decoding is still in-progress, we need to make sure the result code + is set to MA_BUSY. + */ + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE job. %s\n", ma_result_description(result)); + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + } else { + result = MA_BUSY; + } + } else { + /* No decoding. This is the simple case. We need only read the file content into memory and we're done. */ + result = ma_resource_manager_data_buffer_node_init_supply_encoded(pResourceManager, pDataBufferNode, pJob->data.resourceManager.loadDataBufferNode.pFilePath, pJob->data.resourceManager.loadDataBufferNode.pFilePathW); + } + + +done: + /* File paths are no longer needed. */ + ma_free(pJob->data.resourceManager.loadDataBufferNode.pFilePath, &pResourceManager->config.allocationCallbacks); + ma_free(pJob->data.resourceManager.loadDataBufferNode.pFilePathW, &pResourceManager->config.allocationCallbacks); + + /* + We need to set the result to at the very end to ensure no other threads try reading the data before we've fully initialized the object. Other threads + are going to be inspecting this variable to determine whether or not they're ready to read data. We can only change the result if it's set to MA_BUSY + because otherwise we may be changing away from an error code which would be bad. An example is if the application creates a data buffer, but then + immediately deletes it before we've got to this point. In this case, pDataBuffer->result will be MA_UNAVAILABLE, and setting it to MA_SUCCESS or any + other error code would cause the buffer to look like it's in a state that it's not. + */ + ma_atomic_compare_and_swap_i32(&pDataBufferNode->result, MA_BUSY, result); + + /* At this point initialization is complete and we can signal the notification if any. */ + if (pJob->data.resourceManager.loadDataBufferNode.pInitNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBufferNode.pInitNotification); + } + if (pJob->data.resourceManager.loadDataBufferNode.pInitFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBufferNode.pInitFence); + } + + /* If we have a success result it means we've fully loaded the buffer. This will happen in the non-decoding case. */ + if (result != MA_BUSY) { + if (pJob->data.resourceManager.loadDataBufferNode.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBufferNode.pDoneNotification); + } + if (pJob->data.resourceManager.loadDataBufferNode.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBufferNode.pDoneFence); + } + } + + /* Increment the node's execution pointer so that the next jobs can be processed. This is how we keep decoding of pages in-order. */ + ma_atomic_fetch_add_32(&pDataBufferNode->executionPointer, 1); + + /* A busy result should be considered successful from the point of view of the job system. */ + if (result == MA_BUSY) { + result = MA_SUCCESS; + } + + return result; +} + +static ma_result ma_job_process__resource_manager__free_data_buffer_node(ma_job* pJob) +{ + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + MA_ASSERT(pJob != NULL); + + pResourceManager = (ma_resource_manager*)pJob->data.resourceManager.freeDataBufferNode.pResourceManager; + MA_ASSERT(pResourceManager != NULL); + + pDataBufferNode = (ma_resource_manager_data_buffer_node*)pJob->data.resourceManager.freeDataBufferNode.pDataBufferNode; + MA_ASSERT(pDataBufferNode != NULL); + + if (pJob->order != ma_atomic_load_32(&pDataBufferNode->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + ma_resource_manager_data_buffer_node_free(pResourceManager, pDataBufferNode); + + /* The event needs to be signalled last. */ + if (pJob->data.resourceManager.freeDataBufferNode.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.freeDataBufferNode.pDoneNotification); + } + + if (pJob->data.resourceManager.freeDataBufferNode.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.freeDataBufferNode.pDoneFence); + } + + ma_atomic_fetch_add_32(&pDataBufferNode->executionPointer, 1); + return MA_SUCCESS; +} + +static ma_result ma_job_process__resource_manager__page_data_buffer_node(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + MA_ASSERT(pJob != NULL); + + pResourceManager = (ma_resource_manager*)pJob->data.resourceManager.pageDataBufferNode.pResourceManager; + MA_ASSERT(pResourceManager != NULL); + + pDataBufferNode = (ma_resource_manager_data_buffer_node*)pJob->data.resourceManager.pageDataBufferNode.pDataBufferNode; + MA_ASSERT(pDataBufferNode != NULL); + + if (pJob->order != ma_atomic_load_32(&pDataBufferNode->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* Don't do any more decoding if the data buffer has started the uninitialization process. */ + result = ma_resource_manager_data_buffer_node_result(pDataBufferNode); + if (result != MA_BUSY) { + goto done; + } + + /* We're ready to decode the next page. */ + result = ma_resource_manager_data_buffer_node_decode_next_page(pResourceManager, pDataBufferNode, (ma_decoder*)pJob->data.resourceManager.pageDataBufferNode.pDecoder); + + /* + If we have a success code by this point, we want to post another job. We're going to set the + result back to MA_BUSY to make it clear that there's still more to load. + */ + if (result == MA_SUCCESS) { + ma_job newJob; + newJob = *pJob; /* Everything is the same as the input job, except the execution order. */ + newJob.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); /* We need a fresh execution order. */ + + result = ma_resource_manager_post_job(pResourceManager, &newJob); + + /* Since the sound isn't yet fully decoded we want the status to be set to busy. */ + if (result == MA_SUCCESS) { + result = MA_BUSY; + } + } + +done: + /* If there's still more to decode the result will be set to MA_BUSY. Otherwise we can free the decoder. */ + if (result != MA_BUSY) { + ma_decoder_uninit((ma_decoder*)pJob->data.resourceManager.pageDataBufferNode.pDecoder); + ma_free(pJob->data.resourceManager.pageDataBufferNode.pDecoder, &pResourceManager->config.allocationCallbacks); + } + + /* If we reached the end we need to treat it as successful. */ + if (result == MA_AT_END) { + result = MA_SUCCESS; + } + + /* Make sure we set the result of node in case some error occurred. */ + ma_atomic_compare_and_swap_i32(&pDataBufferNode->result, MA_BUSY, result); + + /* Signal the notification after setting the result in case the notification callback wants to inspect the result code. */ + if (result != MA_BUSY) { + if (pJob->data.resourceManager.pageDataBufferNode.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.pageDataBufferNode.pDoneNotification); + } + + if (pJob->data.resourceManager.pageDataBufferNode.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.pageDataBufferNode.pDoneFence); + } + } + + ma_atomic_fetch_add_32(&pDataBufferNode->executionPointer, 1); + return result; +} + + +static ma_result ma_job_process__resource_manager__load_data_buffer(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer* pDataBuffer; + ma_resource_manager_data_supply_type dataSupplyType = ma_resource_manager_data_supply_type_unknown; + ma_bool32 isConnectorInitialized = MA_FALSE; + + /* + All we're doing here is checking if the node has finished loading. If not, we just re-post the job + and keep waiting. Otherwise we increment the execution counter and set the buffer's result code. + */ + MA_ASSERT(pJob != NULL); + + pDataBuffer = (ma_resource_manager_data_buffer*)pJob->data.resourceManager.loadDataBuffer.pDataBuffer; + MA_ASSERT(pDataBuffer != NULL); + + pResourceManager = pDataBuffer->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataBuffer->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Attempting to execute out of order. Probably interleaved with a MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER job. */ + } + + /* + First thing we need to do is check whether or not the data buffer is getting deleted. If so we + just abort, but making sure we increment the execution pointer. + */ + result = ma_resource_manager_data_buffer_result(pDataBuffer); + if (result != MA_BUSY) { + goto done; /* <-- This will ensure the exucution pointer is incremented. */ + } else { + result = MA_SUCCESS; /* <-- Make sure this is reset. */ + } + + /* Try initializing the connector if we haven't already. */ + isConnectorInitialized = ma_resource_manager_data_buffer_has_connector(pDataBuffer); + if (isConnectorInitialized == MA_FALSE) { + dataSupplyType = ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode); + + if (dataSupplyType != ma_resource_manager_data_supply_type_unknown) { + /* We can now initialize the connector. If this fails, we need to abort. It's very rare for this to fail. */ + ma_resource_manager_data_source_config dataSourceConfig; /* For setting initial looping state and range. */ + dataSourceConfig = ma_resource_manager_data_source_config_init(); + dataSourceConfig.rangeBegInPCMFrames = pJob->data.resourceManager.loadDataBuffer.rangeBegInPCMFrames; + dataSourceConfig.rangeEndInPCMFrames = pJob->data.resourceManager.loadDataBuffer.rangeEndInPCMFrames; + dataSourceConfig.loopPointBegInPCMFrames = pJob->data.resourceManager.loadDataBuffer.loopPointBegInPCMFrames; + dataSourceConfig.loopPointEndInPCMFrames = pJob->data.resourceManager.loadDataBuffer.loopPointEndInPCMFrames; + dataSourceConfig.isLooping = pJob->data.resourceManager.loadDataBuffer.isLooping; + + result = ma_resource_manager_data_buffer_init_connector(pDataBuffer, &dataSourceConfig, pJob->data.resourceManager.loadDataBuffer.pInitNotification, pJob->data.resourceManager.loadDataBuffer.pInitFence); + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to initialize connector for data buffer. %s.\n", ma_result_description(result)); + goto done; + } + } else { + /* Don't have a known data supply type. Most likely the data buffer node is still loading, but it could be that an error occurred. */ + } + } else { + /* The connector is already initialized. Nothing to do here. */ + } + + /* + If the data node is still loading, we need to repost the job and *not* increment the execution + pointer (i.e. we need to not fall through to the "done" label). + + There is a hole between here and the where the data connector is initialized where the data + buffer node may have finished initializing. We need to check for this by checking the result of + the data buffer node and whether or not we had an unknown data supply type at the time of + trying to initialize the data connector. + */ + result = ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode); + if (result == MA_BUSY || (result == MA_SUCCESS && isConnectorInitialized == MA_FALSE && dataSupplyType == ma_resource_manager_data_supply_type_unknown)) { + return ma_resource_manager_post_job(pResourceManager, pJob); + } + +done: + /* Only move away from a busy code so that we don't trash any existing error codes. */ + ma_atomic_compare_and_swap_i32(&pDataBuffer->result, MA_BUSY, result); + + /* Only signal the other threads after the result has been set just for cleanliness sake. */ + if (pJob->data.resourceManager.loadDataBuffer.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBuffer.pDoneNotification); + } + if (pJob->data.resourceManager.loadDataBuffer.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBuffer.pDoneFence); + } + + /* + If at this point the data buffer has not had it's connector initialized, it means the + notification event was never signalled which means we need to signal it here. + */ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE && result != MA_SUCCESS) { + if (pJob->data.resourceManager.loadDataBuffer.pInitNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBuffer.pInitNotification); + } + if (pJob->data.resourceManager.loadDataBuffer.pInitFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBuffer.pInitFence); + } + } + + ma_atomic_fetch_add_32(&pDataBuffer->executionPointer, 1); + return result; +} + +static ma_result ma_job_process__resource_manager__free_data_buffer(ma_job* pJob) +{ + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer* pDataBuffer; + + MA_ASSERT(pJob != NULL); + + pDataBuffer = (ma_resource_manager_data_buffer*)pJob->data.resourceManager.freeDataBuffer.pDataBuffer; + MA_ASSERT(pDataBuffer != NULL); + + pResourceManager = pDataBuffer->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataBuffer->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + ma_resource_manager_data_buffer_uninit_internal(pDataBuffer); + + /* The event needs to be signalled last. */ + if (pJob->data.resourceManager.freeDataBuffer.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.freeDataBuffer.pDoneNotification); + } + + if (pJob->data.resourceManager.freeDataBuffer.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.freeDataBuffer.pDoneFence); + } + + ma_atomic_fetch_add_32(&pDataBuffer->executionPointer, 1); + return MA_SUCCESS; +} + +static ma_result ma_job_process__resource_manager__load_data_stream(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_decoder_config decoderConfig; + ma_uint32 pageBufferSizeInBytes; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.loadDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_BUSY) { + result = MA_INVALID_OPERATION; /* Most likely the data stream is being uninitialized. */ + goto done; + } + + /* We need to initialize the decoder first so we can determine the size of the pages. */ + decoderConfig = ma_resource_manager__init_decoder_config(pResourceManager); + + if (pJob->data.resourceManager.loadDataStream.pFilePath != NULL) { + result = ma_decoder_init_vfs(pResourceManager->config.pVFS, pJob->data.resourceManager.loadDataStream.pFilePath, &decoderConfig, &pDataStream->decoder); + } else { + result = ma_decoder_init_vfs_w(pResourceManager->config.pVFS, pJob->data.resourceManager.loadDataStream.pFilePathW, &decoderConfig, &pDataStream->decoder); + } + if (result != MA_SUCCESS) { + goto done; + } + + /* Retrieve the total length of the file before marking the decoder as loaded. */ + if ((pDataStream->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH) == 0) { + result = ma_decoder_get_length_in_pcm_frames(&pDataStream->decoder, &pDataStream->totalLengthInPCMFrames); + if (result != MA_SUCCESS) { + goto done; /* Failed to retrieve the length. */ + } + } else { + pDataStream->totalLengthInPCMFrames = 0; + } + + /* + Only mark the decoder as initialized when the length of the decoder has been retrieved because that can possibly require a scan over the whole file + and we don't want to have another thread trying to access the decoder while it's scanning. + */ + pDataStream->isDecoderInitialized = MA_TRUE; + + /* We have the decoder so we can now initialize our page buffer. */ + pageBufferSizeInBytes = ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream) * 2 * ma_get_bytes_per_frame(pDataStream->decoder.outputFormat, pDataStream->decoder.outputChannels); + + pDataStream->pPageData = ma_malloc(pageBufferSizeInBytes, &pResourceManager->config.allocationCallbacks); + if (pDataStream->pPageData == NULL) { + ma_decoder_uninit(&pDataStream->decoder); + result = MA_OUT_OF_MEMORY; + goto done; + } + + /* Seek to our initial seek point before filling the initial pages. */ + ma_decoder_seek_to_pcm_frame(&pDataStream->decoder, pJob->data.resourceManager.loadDataStream.initialSeekPoint); + + /* We have our decoder and our page buffer, so now we need to fill our pages. */ + ma_resource_manager_data_stream_fill_pages(pDataStream); + + /* And now we're done. We want to make sure the result is MA_SUCCESS. */ + result = MA_SUCCESS; + +done: + ma_free(pJob->data.resourceManager.loadDataStream.pFilePath, &pResourceManager->config.allocationCallbacks); + ma_free(pJob->data.resourceManager.loadDataStream.pFilePathW, &pResourceManager->config.allocationCallbacks); + + /* We can only change the status away from MA_BUSY. If it's set to anything else it means an error has occurred somewhere or the uninitialization process has started (most likely). */ + ma_atomic_compare_and_swap_i32(&pDataStream->result, MA_BUSY, result); + + /* Only signal the other threads after the result has been set just for cleanliness sake. */ + if (pJob->data.resourceManager.loadDataStream.pInitNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataStream.pInitNotification); + } + if (pJob->data.resourceManager.loadDataStream.pInitFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataStream.pInitFence); + } + + ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1); + return result; +} + +static ma_result ma_job_process__resource_manager__free_data_stream(ma_job* pJob) +{ + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.freeDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* If our status is not MA_UNAVAILABLE we have a bug somewhere. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) == MA_UNAVAILABLE); + + if (pDataStream->isDecoderInitialized) { + ma_decoder_uninit(&pDataStream->decoder); + } + + if (pDataStream->pPageData != NULL) { + ma_free(pDataStream->pPageData, &pResourceManager->config.allocationCallbacks); + pDataStream->pPageData = NULL; /* Just in case... */ + } + + ma_data_source_uninit(&pDataStream->ds); + + /* The event needs to be signalled last. */ + if (pJob->data.resourceManager.freeDataStream.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.freeDataStream.pDoneNotification); + } + if (pJob->data.resourceManager.freeDataStream.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.freeDataStream.pDoneFence); + } + + /*ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1);*/ + return MA_SUCCESS; +} + +static ma_result ma_job_process__resource_manager__page_data_stream(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.pageDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* For streams, the status should be MA_SUCCESS. */ + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + result = MA_INVALID_OPERATION; + goto done; + } + + ma_resource_manager_data_stream_fill_page(pDataStream, pJob->data.resourceManager.pageDataStream.pageIndex); + +done: + ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1); + return result; +} + +static ma_result ma_job_process__resource_manager__seek_data_stream(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.seekDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* For streams the status should be MA_SUCCESS for this to do anything. */ + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS || pDataStream->isDecoderInitialized == MA_FALSE) { + result = MA_INVALID_OPERATION; + goto done; + } + + /* + With seeking we just assume both pages are invalid and the relative frame cursor at position 0. This is basically exactly the same as loading, except + instead of initializing the decoder, we seek to a frame. + */ + ma_decoder_seek_to_pcm_frame(&pDataStream->decoder, pJob->data.resourceManager.seekDataStream.frameIndex); + + /* After seeking we'll need to reload the pages. */ + ma_resource_manager_data_stream_fill_pages(pDataStream); + + /* We need to let the public API know that we're done seeking. */ + ma_atomic_fetch_sub_32(&pDataStream->seekCounter, 1); + +done: + ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1); + return result; +} + +MA_API ma_result ma_resource_manager_process_job(ma_resource_manager* pResourceManager, ma_job* pJob) +{ + if (pResourceManager == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_process(pJob); +} + +MA_API ma_result ma_resource_manager_process_next_job(ma_resource_manager* pResourceManager) +{ + ma_result result; + ma_job job; + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + /* This will return MA_CANCELLED if the next job is a quit job. */ + result = ma_resource_manager_next_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + return result; + } + + return ma_job_process(&job); +} +#else +/* We'll get here if the resource manager is being excluded from the build. We need to define the job processing callbacks as no-ops. */ +static ma_result ma_job_process__resource_manager__load_data_buffer_node(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__free_data_buffer_node(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__page_data_buffer_node(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__load_data_buffer(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__free_data_buffer(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__load_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__free_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__page_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__seek_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +#endif /* MA_NO_RESOURCE_MANAGER */ + + +#ifndef MA_NO_NODE_GRAPH +/* 10ms @ 48K = 480. Must never exceed 65535. */ +#ifndef MA_DEFAULT_NODE_CACHE_CAP_IN_FRAMES_PER_BUS +#define MA_DEFAULT_NODE_CACHE_CAP_IN_FRAMES_PER_BUS 480 +#endif + + +static ma_result ma_node_read_pcm_frames(ma_node* pNode, ma_uint32 outputBusIndex, float* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead, ma_uint64 globalTime); + +MA_API void ma_debug_fill_pcm_frames_with_sine_wave(float* pFramesOut, ma_uint32 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ +#ifndef MA_NO_GENERATION + { + ma_waveform_config waveformConfig; + ma_waveform waveform; + + waveformConfig = ma_waveform_config_init(format, channels, sampleRate, ma_waveform_type_sine, 1.0, 400); + ma_waveform_init(&waveformConfig, &waveform); + ma_waveform_read_pcm_frames(&waveform, pFramesOut, frameCount, NULL); + } +#else + { + (void)pFramesOut; + (void)frameCount; + (void)format; + (void)channels; + (void)sampleRate; +#if defined(MA_DEBUG_OUTPUT) + { +#if _MSC_VER +#pragma message ("ma_debug_fill_pcm_frames_with_sine_wave() will do nothing because MA_NO_GENERATION is enabled.") +#endif + } +#endif + } +#endif +} + + + +MA_API ma_node_graph_config ma_node_graph_config_init(ma_uint32 channels) +{ + ma_node_graph_config config; + + MA_ZERO_OBJECT(&config); + config.channels = channels; + config.nodeCacheCapInFrames = MA_DEFAULT_NODE_CACHE_CAP_IN_FRAMES_PER_BUS; + + return config; +} + + +static void ma_node_graph_set_is_reading(ma_node_graph* pNodeGraph, ma_bool32 isReading) +{ + MA_ASSERT(pNodeGraph != NULL); + ma_atomic_exchange_32(&pNodeGraph->isReading, isReading); +} + +#if 0 +static ma_bool32 ma_node_graph_is_reading(ma_node_graph* pNodeGraph) +{ + MA_ASSERT(pNodeGraph != NULL); + return ma_atomic_load_32(&pNodeGraph->isReading); +} +#endif + + +static void ma_node_graph_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_node_graph* pNodeGraph = (ma_node_graph*)pNode; + ma_uint64 framesRead; + + ma_node_graph_read_pcm_frames(pNodeGraph, ppFramesOut[0], *pFrameCountOut, &framesRead); + + *pFrameCountOut = (ma_uint32)framesRead; /* Safe cast. */ + + (void)ppFramesIn; + (void)pFrameCountIn; +} + +static ma_node_vtable g_node_graph_node_vtable = +{ + ma_node_graph_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 0, /* 0 input buses. */ + 1, /* 1 output bus. */ + 0 /* Flags. */ +}; + +static void ma_node_graph_endpoint_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + MA_ASSERT(pNode != NULL); + MA_ASSERT(ma_node_get_input_bus_count(pNode) == 1); + MA_ASSERT(ma_node_get_output_bus_count(pNode) == 1); + + /* Input channel count needs to be the same as the output channel count. */ + MA_ASSERT(ma_node_get_input_channels(pNode, 0) == ma_node_get_output_channels(pNode, 0)); + + /* We don't need to do anything here because it's a passthrough. */ + (void)pNode; + (void)ppFramesIn; + (void)pFrameCountIn; + (void)ppFramesOut; + (void)pFrameCountOut; + +#if 0 + /* The data has already been mixed. We just need to move it to the output buffer. */ + if (ppFramesIn != NULL) { + ma_copy_pcm_frames(ppFramesOut[0], ppFramesIn[0], *pFrameCountOut, ma_format_f32, ma_node_get_output_channels(pNode, 0)); + } +#endif +} + +static ma_node_vtable g_node_graph_endpoint_vtable = +{ + ma_node_graph_endpoint_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* 1 input bus. */ + 1, /* 1 output bus. */ + MA_NODE_FLAG_PASSTHROUGH /* Flags. The endpoint is a passthrough. */ +}; + +MA_API ma_result ma_node_graph_init(const ma_node_graph_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node_graph* pNodeGraph) +{ + ma_result result; + ma_node_config baseConfig; + ma_node_config endpointConfig; + + if (pNodeGraph == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNodeGraph); + pNodeGraph->nodeCacheCapInFrames = pConfig->nodeCacheCapInFrames; + if (pNodeGraph->nodeCacheCapInFrames == 0) { + pNodeGraph->nodeCacheCapInFrames = MA_DEFAULT_NODE_CACHE_CAP_IN_FRAMES_PER_BUS; + } + + + /* Base node so we can use the node graph as a node into another graph. */ + baseConfig = ma_node_config_init(); + baseConfig.vtable = &g_node_graph_node_vtable; + baseConfig.pOutputChannels = &pConfig->channels; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pNodeGraph->base); + if (result != MA_SUCCESS) { + return result; + } + + + /* Endpoint. */ + endpointConfig = ma_node_config_init(); + endpointConfig.vtable = &g_node_graph_endpoint_vtable; + endpointConfig.pInputChannels = &pConfig->channels; + endpointConfig.pOutputChannels = &pConfig->channels; + + result = ma_node_init(pNodeGraph, &endpointConfig, pAllocationCallbacks, &pNodeGraph->endpoint); + if (result != MA_SUCCESS) { + ma_node_uninit(&pNodeGraph->base, pAllocationCallbacks); + return result; + } + + return MA_SUCCESS; +} + +MA_API void ma_node_graph_uninit(ma_node_graph* pNodeGraph, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pNodeGraph == NULL) { + return; + } + + ma_node_uninit(&pNodeGraph->endpoint, pAllocationCallbacks); +} + +MA_API ma_node* ma_node_graph_get_endpoint(ma_node_graph* pNodeGraph) +{ + if (pNodeGraph == NULL) { + return NULL; + } + + return &pNodeGraph->endpoint; +} + +MA_API ma_result ma_node_graph_read_pcm_frames(ma_node_graph* pNodeGraph, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesRead; + ma_uint32 channels; + + if (pFramesRead != NULL) { + *pFramesRead = 0; /* Safety. */ + } + + if (pNodeGraph == NULL) { + return MA_INVALID_ARGS; + } + + channels = ma_node_get_output_channels(&pNodeGraph->endpoint, 0); + + + /* We'll be nice and try to do a full read of all frameCount frames. */ + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + ma_uint32 framesJustRead; + ma_uint64 framesToRead = frameCount - totalFramesRead; + + if (framesToRead > 0xFFFFFFFF) { + framesToRead = 0xFFFFFFFF; + } + + ma_node_graph_set_is_reading(pNodeGraph, MA_TRUE); + { + result = ma_node_read_pcm_frames(&pNodeGraph->endpoint, 0, (float*)ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, ma_format_f32, channels), (ma_uint32)framesToRead, &framesJustRead, ma_node_get_time(&pNodeGraph->endpoint)); + } + ma_node_graph_set_is_reading(pNodeGraph, MA_FALSE); + + totalFramesRead += framesJustRead; + + if (result != MA_SUCCESS) { + break; + } + + /* Abort if we weren't able to read any frames or else we risk getting stuck in a loop. */ + if (framesJustRead == 0) { + break; + } + } + + /* Let's go ahead and silence any leftover frames just for some added safety to ensure the caller doesn't try emitting garbage out of the speakers. */ + if (totalFramesRead < frameCount) { + ma_silence_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, ma_format_f32, channels), (frameCount - totalFramesRead), ma_format_f32, channels); + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; +} + +MA_API ma_uint32 ma_node_graph_get_channels(const ma_node_graph* pNodeGraph) +{ + if (pNodeGraph == NULL) { + return 0; + } + + return ma_node_get_output_channels(&pNodeGraph->endpoint, 0); +} + +MA_API ma_uint64 ma_node_graph_get_time(const ma_node_graph* pNodeGraph) +{ + if (pNodeGraph == NULL) { + return 0; + } + + return ma_node_get_time(&pNodeGraph->endpoint); /* Global time is just the local time of the endpoint. */ +} + +MA_API ma_result ma_node_graph_set_time(ma_node_graph* pNodeGraph, ma_uint64 globalTime) +{ + if (pNodeGraph == NULL) { + return MA_INVALID_ARGS; + } + + return ma_node_set_time(&pNodeGraph->endpoint, globalTime); /* Global time is just the local time of the endpoint. */ +} + + +#define MA_NODE_OUTPUT_BUS_FLAG_HAS_READ 0x01 /* Whether or not this bus ready to read more data. Only used on nodes with multiple output buses. */ + +static ma_result ma_node_output_bus_init(ma_node* pNode, ma_uint32 outputBusIndex, ma_uint32 channels, ma_node_output_bus* pOutputBus) +{ + MA_ASSERT(pOutputBus != NULL); + MA_ASSERT(outputBusIndex < MA_MAX_NODE_BUS_COUNT); + MA_ASSERT(outputBusIndex < ma_node_get_output_bus_count(pNode)); + MA_ASSERT(channels < 256); + + MA_ZERO_OBJECT(pOutputBus); + + if (channels == 0) { + return MA_INVALID_ARGS; + } + + pOutputBus->pNode = pNode; + pOutputBus->outputBusIndex = (ma_uint8)outputBusIndex; + pOutputBus->channels = (ma_uint8)channels; + pOutputBus->flags = MA_NODE_OUTPUT_BUS_FLAG_HAS_READ; /* <-- Important that this flag is set by default. */ + pOutputBus->volume = 1; + + return MA_SUCCESS; +} + +static void ma_node_output_bus_lock(ma_node_output_bus* pOutputBus) +{ + ma_spinlock_lock(&pOutputBus->lock); +} + +static void ma_node_output_bus_unlock(ma_node_output_bus* pOutputBus) +{ + ma_spinlock_unlock(&pOutputBus->lock); +} + + +static ma_uint32 ma_node_output_bus_get_channels(const ma_node_output_bus* pOutputBus) +{ + return pOutputBus->channels; +} + + +static void ma_node_output_bus_set_has_read(ma_node_output_bus* pOutputBus, ma_bool32 hasRead) +{ + if (hasRead) { + ma_atomic_fetch_or_32(&pOutputBus->flags, MA_NODE_OUTPUT_BUS_FLAG_HAS_READ); + } else { + ma_atomic_fetch_and_32(&pOutputBus->flags, (ma_uint32)~MA_NODE_OUTPUT_BUS_FLAG_HAS_READ); + } +} + +static ma_bool32 ma_node_output_bus_has_read(ma_node_output_bus* pOutputBus) +{ + return (ma_atomic_load_32(&pOutputBus->flags) & MA_NODE_OUTPUT_BUS_FLAG_HAS_READ) != 0; +} + + +static void ma_node_output_bus_set_is_attached(ma_node_output_bus* pOutputBus, ma_bool32 isAttached) +{ + ma_atomic_exchange_32(&pOutputBus->isAttached, isAttached); +} + +static ma_bool32 ma_node_output_bus_is_attached(ma_node_output_bus* pOutputBus) +{ + return ma_atomic_load_32(&pOutputBus->isAttached); +} + + +static ma_result ma_node_output_bus_set_volume(ma_node_output_bus* pOutputBus, float volume) +{ + MA_ASSERT(pOutputBus != NULL); + + if (volume < 0.0f) { + volume = 0.0f; + } + + ma_atomic_exchange_f32(&pOutputBus->volume, volume); + + return MA_SUCCESS; +} + +static float ma_node_output_bus_get_volume(const ma_node_output_bus* pOutputBus) +{ + return ma_atomic_load_f32((float*)&pOutputBus->volume); +} + + +static ma_result ma_node_input_bus_init(ma_uint32 channels, ma_node_input_bus* pInputBus) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(channels < 256); + + MA_ZERO_OBJECT(pInputBus); + + if (channels == 0) { + return MA_INVALID_ARGS; + } + + pInputBus->channels = (ma_uint8)channels; + + return MA_SUCCESS; +} + +static void ma_node_input_bus_lock(ma_node_input_bus* pInputBus) +{ + MA_ASSERT(pInputBus != NULL); + + ma_spinlock_lock(&pInputBus->lock); +} + +static void ma_node_input_bus_unlock(ma_node_input_bus* pInputBus) +{ + MA_ASSERT(pInputBus != NULL); + + ma_spinlock_unlock(&pInputBus->lock); +} + + +static void ma_node_input_bus_next_begin(ma_node_input_bus* pInputBus) +{ + ma_atomic_fetch_add_32(&pInputBus->nextCounter, 1); +} + +static void ma_node_input_bus_next_end(ma_node_input_bus* pInputBus) +{ + ma_atomic_fetch_sub_32(&pInputBus->nextCounter, 1); +} + +static ma_uint32 ma_node_input_bus_get_next_counter(ma_node_input_bus* pInputBus) +{ + return ma_atomic_load_32(&pInputBus->nextCounter); +} + + +static ma_uint32 ma_node_input_bus_get_channels(const ma_node_input_bus* pInputBus) +{ + return pInputBus->channels; +} + + +static void ma_node_input_bus_detach__no_output_bus_lock(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(pOutputBus != NULL); + + /* + Mark the output bus as detached first. This will prevent future iterations on the audio thread + from iterating this output bus. + */ + ma_node_output_bus_set_is_attached(pOutputBus, MA_FALSE); + + /* + We cannot use the output bus lock here since it'll be getting used at a higher level, but we do + still need to use the input bus lock since we'll be updating pointers on two different output + buses. The same rules apply here as the attaching case. Although we're using a lock here, we're + *not* using a lock when iterating over the list in the audio thread. We therefore need to craft + this in a way such that the iteration on the audio thread doesn't break. + + The the first thing to do is swap out the "next" pointer of the previous output bus with the + new "next" output bus. This is the operation that matters for iteration on the audio thread. + After that, the previous pointer on the new "next" pointer needs to be updated, after which + point the linked list will be in a good state. + */ + ma_node_input_bus_lock(pInputBus); + { + ma_node_output_bus* pOldPrev = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pPrev); + ma_node_output_bus* pOldNext = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pNext); + + if (pOldPrev != NULL) { + ma_atomic_exchange_ptr(&pOldPrev->pNext, pOldNext); /* <-- This is where the output bus is detached from the list. */ + } + if (pOldNext != NULL) { + ma_atomic_exchange_ptr(&pOldNext->pPrev, pOldPrev); /* <-- This is required for detachment. */ + } + } + ma_node_input_bus_unlock(pInputBus); + + /* At this point the output bus is detached and the linked list is completely unaware of it. Reset some data for safety. */ + ma_atomic_exchange_ptr(&pOutputBus->pNext, NULL); /* Using atomic exchanges here, mainly for the benefit of analysis tools which don't always recognize spinlocks. */ + ma_atomic_exchange_ptr(&pOutputBus->pPrev, NULL); /* As above. */ + pOutputBus->pInputNode = NULL; + pOutputBus->inputNodeInputBusIndex = 0; + + + /* + For thread-safety reasons, we don't want to be returning from this straight away. We need to + wait for the audio thread to finish with the output bus. There's two things we need to wait + for. The first is the part that selects the next output bus in the list, and the other is the + part that reads from the output bus. Basically all we're doing is waiting for the input bus + to stop referencing the output bus. + + We're doing this part last because we want the section above to run while the audio thread + is finishing up with the output bus, just for efficiency reasons. We marked the output bus as + detached right at the top of this function which is going to prevent the audio thread from + iterating the output bus again. + */ + + /* Part 1: Wait for the current iteration to complete. */ + while (ma_node_input_bus_get_next_counter(pInputBus) > 0) { + ma_yield(); + } + + /* Part 2: Wait for any reads to complete. */ + while (ma_atomic_load_32(&pOutputBus->refCount) > 0) { + ma_yield(); + } + + /* + At this point we're done detaching and we can be guaranteed that the audio thread is not going + to attempt to reference this output bus again (until attached again). + */ +} + +#if 0 /* Not used at the moment, but leaving here in case I need it later. */ +static void ma_node_input_bus_detach(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(pOutputBus != NULL); + + ma_node_output_bus_lock(pOutputBus); + { + ma_node_input_bus_detach__no_output_bus_lock(pInputBus, pOutputBus); + } + ma_node_output_bus_unlock(pOutputBus); +} +#endif + +static void ma_node_input_bus_attach(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus, ma_node* pNewInputNode, ma_uint32 inputNodeInputBusIndex) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(pOutputBus != NULL); + + ma_node_output_bus_lock(pOutputBus); + { + ma_node_output_bus* pOldInputNode = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pInputNode); + + /* Detach from any existing attachment first if necessary. */ + if (pOldInputNode != NULL) { + ma_node_input_bus_detach__no_output_bus_lock(pInputBus, pOutputBus); + } + + /* + At this point we can be sure the output bus is not attached to anything. The linked list in the + old input bus has been updated so that pOutputBus will not get iterated again. + */ + pOutputBus->pInputNode = pNewInputNode; /* No need for an atomic assignment here because modification of this variable always happens within a lock. */ + pOutputBus->inputNodeInputBusIndex = (ma_uint8)inputNodeInputBusIndex; + + /* + Now we need to attach the output bus to the linked list. This involves updating two pointers on + two different output buses so I'm going to go ahead and keep this simple and just use a lock. + There are ways to do this without a lock, but it's just too hard to maintain for it's value. + + Although we're locking here, it's important to remember that we're *not* locking when iterating + and reading audio data since that'll be running on the audio thread. As a result we need to be + careful how we craft this so that we don't break iteration. What we're going to do is always + attach the new item so that it becomes the first item in the list. That way, as we're iterating + we won't break any links in the list and iteration will continue safely. The detaching case will + also be crafted in a way as to not break list iteration. It's important to remember to use + atomic exchanges here since no locking is happening on the audio thread during iteration. + */ + ma_node_input_bus_lock(pInputBus); + { + ma_node_output_bus* pNewPrev = &pInputBus->head; + ma_node_output_bus* pNewNext = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); + + /* Update the local output bus. */ + ma_atomic_exchange_ptr(&pOutputBus->pPrev, pNewPrev); + ma_atomic_exchange_ptr(&pOutputBus->pNext, pNewNext); + + /* Update the other output buses to point back to the local output bus. */ + ma_atomic_exchange_ptr(&pInputBus->head.pNext, pOutputBus); /* <-- This is where the output bus is actually attached to the input bus. */ + + /* Do the previous pointer last. This is only used for detachment. */ + if (pNewNext != NULL) { + ma_atomic_exchange_ptr(&pNewNext->pPrev, pOutputBus); + } + } + ma_node_input_bus_unlock(pInputBus); + + /* + Mark the node as attached last. This is used to controlling whether or the output bus will be + iterated on the audio thread. Mainly required for detachment purposes. + */ + ma_node_output_bus_set_is_attached(pOutputBus, MA_TRUE); + } + ma_node_output_bus_unlock(pOutputBus); +} + +static ma_node_output_bus* ma_node_input_bus_next(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus) +{ + ma_node_output_bus* pNext; + + MA_ASSERT(pInputBus != NULL); + + if (pOutputBus == NULL) { + return NULL; + } + + ma_node_input_bus_next_begin(pInputBus); + { + pNext = pOutputBus; + for (;;) { + pNext = (ma_node_output_bus*)ma_atomic_load_ptr(&pNext->pNext); + if (pNext == NULL) { + break; /* Reached the end. */ + } + + if (ma_node_output_bus_is_attached(pNext) == MA_FALSE) { + continue; /* The node is not attached. Keep checking. */ + } + + /* The next node has been selected. */ + break; + } + + /* We need to increment the reference count of the selected node. */ + if (pNext != NULL) { + ma_atomic_fetch_add_32(&pNext->refCount, 1); + } + + /* The previous node is no longer being referenced. */ + ma_atomic_fetch_sub_32(&pOutputBus->refCount, 1); + } + ma_node_input_bus_next_end(pInputBus); + + return pNext; +} + +static ma_node_output_bus* ma_node_input_bus_first(ma_node_input_bus* pInputBus) +{ + return ma_node_input_bus_next(pInputBus, &pInputBus->head); +} + + + +static ma_result ma_node_input_bus_read_pcm_frames(ma_node* pInputNode, ma_node_input_bus* pInputBus, float* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead, ma_uint64 globalTime) +{ + ma_result result = MA_SUCCESS; + ma_node_output_bus* pOutputBus; + ma_node_output_bus* pFirst; + ma_uint32 inputChannels; + ma_bool32 doesOutputBufferHaveContent = MA_FALSE; + + (void)pInputNode; /* Not currently used. */ + + /* + This will be called from the audio thread which means we can't be doing any locking. Basically, + this function will not perfom any locking, whereas attaching and detaching will, but crafted in + such a way that we don't need to perform any locking here. The important thing to remember is + to always iterate in a forward direction. + + In order to process any data we need to first read from all input buses. That's where this + function comes in. This iterates over each of the attachments and accumulates/mixes them. We + also convert the channels to the nodes output channel count before mixing. We want to do this + channel conversion so that the caller of this function can invoke the processing callback + without having to do it themselves. + + When we iterate over each of the attachments on the input bus, we need to read as much data as + we can from each of them so that we don't end up with holes between each of the attachments. To + do this, we need to read from each attachment in a loop and read as many frames as we can, up + to `frameCount`. + */ + MA_ASSERT(pInputNode != NULL); + MA_ASSERT(pFramesRead != NULL); /* pFramesRead is critical and must always be specified. On input it's undefined and on output it'll be set to the number of frames actually read. */ + + *pFramesRead = 0; /* Safety. */ + + inputChannels = ma_node_input_bus_get_channels(pInputBus); + + /* + We need to be careful with how we call ma_node_input_bus_first() and ma_node_input_bus_next(). They + are both critical to our lock-free thread-safety system. We can only call ma_node_input_bus_first() + once per iteration, however we have an optimization to checks whether or not it's the first item in + the list. We therefore need to store a pointer to the first item rather than repeatedly calling + ma_node_input_bus_first(). It's safe to keep hold of this pointer, so long as we don't dereference it + after calling ma_node_input_bus_next(), which we won't be. + */ + pFirst = ma_node_input_bus_first(pInputBus); + if (pFirst == NULL) { + return MA_SUCCESS; /* No attachments. Read nothing. */ + } + + for (pOutputBus = pFirst; pOutputBus != NULL; pOutputBus = ma_node_input_bus_next(pInputBus, pOutputBus)) { + ma_uint32 framesProcessed = 0; + ma_bool32 isSilentOutput = MA_FALSE; + + MA_ASSERT(pOutputBus->pNode != NULL); + MA_ASSERT(((ma_node_base*)pOutputBus->pNode)->vtable != NULL); + + isSilentOutput = (((ma_node_base*)pOutputBus->pNode)->vtable->flags & MA_NODE_FLAG_SILENT_OUTPUT) != 0; + + if (pFramesOut != NULL) { + /* Read. */ + float temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE / sizeof(float)]; + ma_uint32 tempCapInFrames = ma_countof(temp) / inputChannels; + + while (framesProcessed < frameCount) { + float* pRunningFramesOut; + ma_uint32 framesToRead; + ma_uint32 framesJustRead; + + framesToRead = frameCount - framesProcessed; + if (framesToRead > tempCapInFrames) { + framesToRead = tempCapInFrames; + } + + pRunningFramesOut = ma_offset_pcm_frames_ptr_f32(pFramesOut, framesProcessed, inputChannels); + + if (doesOutputBufferHaveContent == MA_FALSE) { + /* Fast path. First attachment. We just read straight into the output buffer (no mixing required). */ + result = ma_node_read_pcm_frames(pOutputBus->pNode, pOutputBus->outputBusIndex, pRunningFramesOut, framesToRead, &framesJustRead, globalTime + framesProcessed); + } else { + /* Slow path. Not the first attachment. Mixing required. */ + result = ma_node_read_pcm_frames(pOutputBus->pNode, pOutputBus->outputBusIndex, temp, framesToRead, &framesJustRead, globalTime + framesProcessed); + if (result == MA_SUCCESS || result == MA_AT_END) { + if (isSilentOutput == MA_FALSE) { /* Don't mix if the node outputs silence. */ + ma_mix_pcm_frames_f32(pRunningFramesOut, temp, framesJustRead, inputChannels, /*volume*/1); + } + } + } + + framesProcessed += framesJustRead; + + /* If we reached the end or otherwise failed to read any data we need to finish up with this output node. */ + if (result != MA_SUCCESS) { + break; + } + + /* If we didn't read anything, abort so we don't get stuck in a loop. */ + if (framesJustRead == 0) { + break; + } + } + + /* If it's the first attachment we didn't do any mixing. Any leftover samples need to be silenced. */ + if (pOutputBus == pFirst && framesProcessed < frameCount) { + ma_silence_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, framesProcessed, ma_format_f32, inputChannels), (frameCount - framesProcessed), ma_format_f32, inputChannels); + } + + if (isSilentOutput == MA_FALSE) { + doesOutputBufferHaveContent = MA_TRUE; + } + } else { + /* Seek. */ + ma_node_read_pcm_frames(pOutputBus->pNode, pOutputBus->outputBusIndex, NULL, frameCount, &framesProcessed, globalTime); + } + } + + /* If we didn't output anything, output silence. */ + if (doesOutputBufferHaveContent == MA_FALSE && pFramesOut != NULL) { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, inputChannels); + } + + /* In this path we always "process" the entire amount. */ + *pFramesRead = frameCount; + + return result; +} + + +MA_API ma_node_config ma_node_config_init(void) +{ + ma_node_config config; + + MA_ZERO_OBJECT(&config); + config.initialState = ma_node_state_started; /* Nodes are started by default. */ + config.inputBusCount = MA_NODE_BUS_COUNT_UNKNOWN; + config.outputBusCount = MA_NODE_BUS_COUNT_UNKNOWN; + + return config; +} + + + +static ma_result ma_node_detach_full(ma_node* pNode); + +static float* ma_node_get_cached_input_ptr(ma_node* pNode, ma_uint32 inputBusIndex) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iInputBus; + float* pBasePtr; + + MA_ASSERT(pNodeBase != NULL); + + /* Input data is stored at the front of the buffer. */ + pBasePtr = pNodeBase->pCachedData; + for (iInputBus = 0; iInputBus < inputBusIndex; iInputBus += 1) { + pBasePtr += pNodeBase->cachedDataCapInFramesPerBus * ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iInputBus]); + } + + return pBasePtr; +} + +static float* ma_node_get_cached_output_ptr(ma_node* pNode, ma_uint32 outputBusIndex) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iInputBus; + ma_uint32 iOutputBus; + float* pBasePtr; + + MA_ASSERT(pNodeBase != NULL); + + /* Cached output data starts after the input data. */ + pBasePtr = pNodeBase->pCachedData; + for (iInputBus = 0; iInputBus < ma_node_get_input_bus_count(pNodeBase); iInputBus += 1) { + pBasePtr += pNodeBase->cachedDataCapInFramesPerBus * ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iInputBus]); + } + + for (iOutputBus = 0; iOutputBus < outputBusIndex; iOutputBus += 1) { + pBasePtr += pNodeBase->cachedDataCapInFramesPerBus * ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[iOutputBus]); + } + + return pBasePtr; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t inputBusOffset; + size_t outputBusOffset; + size_t cachedDataOffset; + ma_uint32 inputBusCount; /* So it doesn't have to be calculated twice. */ + ma_uint32 outputBusCount; /* So it doesn't have to be calculated twice. */ +} ma_node_heap_layout; + +static ma_result ma_node_translate_bus_counts(const ma_node_config* pConfig, ma_uint32* pInputBusCount, ma_uint32* pOutputBusCount) +{ + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pInputBusCount != NULL); + MA_ASSERT(pOutputBusCount != NULL); + + /* Bus counts are determined by the vtable, unless they're set to `MA_NODE_BUS_COUNT_UNKNWON`, in which case they're taken from the config. */ + if (pConfig->vtable->inputBusCount == MA_NODE_BUS_COUNT_UNKNOWN) { + inputBusCount = pConfig->inputBusCount; + } else { + inputBusCount = pConfig->vtable->inputBusCount; + + if (pConfig->inputBusCount != MA_NODE_BUS_COUNT_UNKNOWN && pConfig->inputBusCount != pConfig->vtable->inputBusCount) { + return MA_INVALID_ARGS; /* Invalid configuration. You must not specify a conflicting bus count between the node's config and the vtable. */ + } + } + + if (pConfig->vtable->outputBusCount == MA_NODE_BUS_COUNT_UNKNOWN) { + outputBusCount = pConfig->outputBusCount; + } else { + outputBusCount = pConfig->vtable->outputBusCount; + + if (pConfig->outputBusCount != MA_NODE_BUS_COUNT_UNKNOWN && pConfig->outputBusCount != pConfig->vtable->outputBusCount) { + return MA_INVALID_ARGS; /* Invalid configuration. You must not specify a conflicting bus count between the node's config and the vtable. */ + } + } + + /* Bus counts must be within limits. */ + if (inputBusCount > MA_MAX_NODE_BUS_COUNT || outputBusCount > MA_MAX_NODE_BUS_COUNT) { + return MA_INVALID_ARGS; + } + + + /* We must have channel counts for each bus. */ + if ((inputBusCount > 0 && pConfig->pInputChannels == NULL) || (outputBusCount > 0 && pConfig->pOutputChannels == NULL)) { + return MA_INVALID_ARGS; /* You must specify channel counts for each input and output bus. */ + } + + + /* Some special rules for passthrough nodes. */ + if ((pConfig->vtable->flags & MA_NODE_FLAG_PASSTHROUGH) != 0) { + if ((pConfig->vtable->inputBusCount != 0 && pConfig->vtable->inputBusCount != 1) || pConfig->vtable->outputBusCount != 1) { + return MA_INVALID_ARGS; /* Passthrough nodes must have exactly 1 output bus and either 0 or 1 input bus. */ + } + + if (pConfig->pInputChannels[0] != pConfig->pOutputChannels[0]) { + return MA_INVALID_ARGS; /* Passthrough nodes must have the same number of channels between input and output nodes. */ + } + } + + + *pInputBusCount = inputBusCount; + *pOutputBusCount = outputBusCount; + + return MA_SUCCESS; +} + +static ma_result ma_node_get_heap_layout(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, ma_node_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL || pConfig->vtable == NULL || pConfig->vtable->onProcess == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_node_translate_bus_counts(pConfig, &inputBusCount, &outputBusCount); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes = 0; + + /* Input buses. */ + if (inputBusCount > MA_MAX_NODE_LOCAL_BUS_COUNT) { + pHeapLayout->inputBusOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(ma_node_input_bus) * inputBusCount); + } else { + pHeapLayout->inputBusOffset = MA_SIZE_MAX; /* MA_SIZE_MAX indicates that no heap allocation is required for the input bus. */ + } + + /* Output buses. */ + if (outputBusCount > MA_MAX_NODE_LOCAL_BUS_COUNT) { + pHeapLayout->outputBusOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(ma_node_output_bus) * outputBusCount); + } else { + pHeapLayout->outputBusOffset = MA_SIZE_MAX; + } + + /* + Cached audio data. + + We need to allocate memory for a caching both input and output data. We have an optimization + where no caching is necessary for specific conditions: + + - The node has 0 inputs and 1 output. + + When a node meets the above conditions, no cache is allocated. + + The size choice for this buffer is a little bit finicky. We don't want to be too wasteful by + allocating too much, but at the same time we want it be large enough so that enough frames can + be processed for each call to ma_node_read_pcm_frames() so that it keeps things efficient. For + now I'm going with 10ms @ 48K which is 480 frames per bus. This is configurable at compile + time. It might also be worth investigating whether or not this can be configured at run time. + */ + if (inputBusCount == 0 && outputBusCount == 1) { + /* Fast path. No cache needed. */ + pHeapLayout->cachedDataOffset = MA_SIZE_MAX; + } else { + /* Slow path. Cache needed. */ + size_t cachedDataSizeInBytes = 0; + ma_uint32 iBus; + + for (iBus = 0; iBus < inputBusCount; iBus += 1) { + cachedDataSizeInBytes += pNodeGraph->nodeCacheCapInFrames * ma_get_bytes_per_frame(ma_format_f32, pConfig->pInputChannels[iBus]); + } + + for (iBus = 0; iBus < outputBusCount; iBus += 1) { + cachedDataSizeInBytes += pNodeGraph->nodeCacheCapInFrames * ma_get_bytes_per_frame(ma_format_f32, pConfig->pOutputChannels[iBus]); + } + + pHeapLayout->cachedDataOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(cachedDataSizeInBytes); + } + + + /* + Not technically part of the heap, but we can output the input and output bus counts so we can + avoid a redundant call to ma_node_translate_bus_counts(). + */ + pHeapLayout->inputBusCount = inputBusCount; + pHeapLayout->outputBusCount = outputBusCount; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_get_heap_size(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_node_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_node_get_heap_layout(pNodeGraph, pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_init_preallocated(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, void* pHeap, ma_node* pNode) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_result result; + ma_node_heap_layout heapLayout; + ma_uint32 iInputBus; + ma_uint32 iOutputBus; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNodeBase); + + result = ma_node_get_heap_layout(pNodeGraph, pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pNodeBase->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pNodeBase->pNodeGraph = pNodeGraph; + pNodeBase->vtable = pConfig->vtable; + pNodeBase->state = pConfig->initialState; + pNodeBase->stateTimes[ma_node_state_started] = 0; + pNodeBase->stateTimes[ma_node_state_stopped] = (ma_uint64)(ma_int64)-1; /* Weird casting for VC6 compatibility. */ + pNodeBase->inputBusCount = heapLayout.inputBusCount; + pNodeBase->outputBusCount = heapLayout.outputBusCount; + + if (heapLayout.inputBusOffset != MA_SIZE_MAX) { + pNodeBase->pInputBuses = (ma_node_input_bus*)ma_offset_ptr(pHeap, heapLayout.inputBusOffset); + } else { + pNodeBase->pInputBuses = pNodeBase->_inputBuses; + } + + if (heapLayout.outputBusOffset != MA_SIZE_MAX) { + pNodeBase->pOutputBuses = (ma_node_output_bus*)ma_offset_ptr(pHeap, heapLayout.inputBusOffset); + } else { + pNodeBase->pOutputBuses = pNodeBase->_outputBuses; + } + + if (heapLayout.cachedDataOffset != MA_SIZE_MAX) { + pNodeBase->pCachedData = (float*)ma_offset_ptr(pHeap, heapLayout.cachedDataOffset); + pNodeBase->cachedDataCapInFramesPerBus = pNodeGraph->nodeCacheCapInFrames; + } else { + pNodeBase->pCachedData = NULL; + } + + + + /* We need to run an initialization step for each input and output bus. */ + for (iInputBus = 0; iInputBus < ma_node_get_input_bus_count(pNodeBase); iInputBus += 1) { + result = ma_node_input_bus_init(pConfig->pInputChannels[iInputBus], &pNodeBase->pInputBuses[iInputBus]); + if (result != MA_SUCCESS) { + return result; + } + } + + for (iOutputBus = 0; iOutputBus < ma_node_get_output_bus_count(pNodeBase); iOutputBus += 1) { + result = ma_node_output_bus_init(pNodeBase, iOutputBus, pConfig->pOutputChannels[iOutputBus], &pNodeBase->pOutputBuses[iOutputBus]); + if (result != MA_SUCCESS) { + return result; + } + } + + + /* The cached data needs to be initialized to silence (or a sine wave tone if we're debugging). */ + if (pNodeBase->pCachedData != NULL) { + ma_uint32 iBus; + +#if 1 /* Toggle this between 0 and 1 to turn debugging on or off. 1 = fill with a sine wave for debugging; 0 = fill with silence. */ + /* For safety we'll go ahead and default the buffer to silence. */ + for (iBus = 0; iBus < ma_node_get_input_bus_count(pNodeBase); iBus += 1) { + ma_silence_pcm_frames(ma_node_get_cached_input_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iBus])); + } + for (iBus = 0; iBus < ma_node_get_output_bus_count(pNodeBase); iBus += 1) { + ma_silence_pcm_frames(ma_node_get_cached_output_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[iBus])); + } +#else + /* For debugging. Default to a sine wave. */ + for (iBus = 0; iBus < ma_node_get_input_bus_count(pNodeBase); iBus += 1) { + ma_debug_fill_pcm_frames_with_sine_wave(ma_node_get_cached_input_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iBus]), 48000); + } + for (iBus = 0; iBus < ma_node_get_output_bus_count(pNodeBase); iBus += 1) { + ma_debug_fill_pcm_frames_with_sine_wave(ma_node_get_cached_output_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[iBus]), 48000); + } +#endif + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_init(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node* pNode) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_node_get_heap_size(pNodeGraph, pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_node_init_preallocated(pNodeGraph, pConfig, pHeap, pNode); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + ((ma_node_base*)pNode)->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_node_uninit(ma_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return; + } + + /* + The first thing we need to do is fully detach the node. This will detach all inputs and + outputs. We need to do this first because it will sever the connection with the node graph and + allow us to complete uninitialization without needing to worry about thread-safety with the + audio thread. The detachment process will wait for any local processing of the node to finish. + */ + ma_node_detach_full(pNode); + + /* + At this point the node should be completely unreferenced by the node graph and we can finish up + the uninitialization process without needing to worry about thread-safety. + */ + if (pNodeBase->_ownsHeap) { + ma_free(pNodeBase->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_node_graph* ma_node_get_node_graph(const ma_node* pNode) +{ + if (pNode == NULL) { + return NULL; + } + + return ((const ma_node_base*)pNode)->pNodeGraph; +} + +MA_API ma_uint32 ma_node_get_input_bus_count(const ma_node* pNode) +{ + if (pNode == NULL) { + return 0; + } + + return ((ma_node_base*)pNode)->inputBusCount; +} + +MA_API ma_uint32 ma_node_get_output_bus_count(const ma_node* pNode) +{ + if (pNode == NULL) { + return 0; + } + + return ((ma_node_base*)pNode)->outputBusCount; +} + + +MA_API ma_uint32 ma_node_get_input_channels(const ma_node* pNode, ma_uint32 inputBusIndex) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNode == NULL) { + return 0; + } + + if (inputBusIndex >= ma_node_get_input_bus_count(pNode)) { + return 0; /* Invalid bus index. */ + } + + return ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[inputBusIndex]); +} + +MA_API ma_uint32 ma_node_get_output_channels(const ma_node* pNode, ma_uint32 outputBusIndex) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNode == NULL) { + return 0; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return 0; /* Invalid bus index. */ + } + + return ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[outputBusIndex]); +} + + +static ma_result ma_node_detach_full(ma_node* pNode) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iInputBus; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + /* + Make sure the node is completely detached first. This will not return until the output bus is + guaranteed to no longer be referenced by the audio thread. + */ + ma_node_detach_all_output_buses(pNode); + + /* + At this point all output buses will have been detached from the graph and we can be guaranteed + that none of it's input nodes will be getting processed by the graph. We can detach these + without needing to worry about the audio thread touching them. + */ + for (iInputBus = 0; iInputBus < ma_node_get_input_bus_count(pNode); iInputBus += 1) { + ma_node_input_bus* pInputBus; + ma_node_output_bus* pOutputBus; + + pInputBus = &pNodeBase->pInputBuses[iInputBus]; + + /* + This is important. We cannot be using ma_node_input_bus_first() or ma_node_input_bus_next(). Those + functions are specifically for the audio thread. We'll instead just manually iterate using standard + linked list logic. We don't need to worry about the audio thread referencing these because the step + above severed the connection to the graph. + */ + for (pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); pOutputBus != NULL; pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pNext)) { + ma_node_detach_output_bus(pOutputBus->pNode, pOutputBus->outputBusIndex); /* This won't do any waiting in practice and should be efficient. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_detach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex) +{ + ma_result result = MA_SUCCESS; + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_node_base* pInputNodeBase; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return MA_INVALID_ARGS; /* Invalid output bus index. */ + } + + /* We need to lock the output bus because we need to inspect the input node and grab it's input bus. */ + ma_node_output_bus_lock(&pNodeBase->pOutputBuses[outputBusIndex]); + { + pInputNodeBase = (ma_node_base*)pNodeBase->pOutputBuses[outputBusIndex].pInputNode; + if (pInputNodeBase != NULL) { + ma_node_input_bus_detach__no_output_bus_lock(&pInputNodeBase->pInputBuses[pNodeBase->pOutputBuses[outputBusIndex].inputNodeInputBusIndex], &pNodeBase->pOutputBuses[outputBusIndex]); + } + } + ma_node_output_bus_unlock(&pNodeBase->pOutputBuses[outputBusIndex]); + + return result; +} + +MA_API ma_result ma_node_detach_all_output_buses(ma_node* pNode) +{ + ma_uint32 iOutputBus; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + for (iOutputBus = 0; iOutputBus < ma_node_get_output_bus_count(pNode); iOutputBus += 1) { + ma_node_detach_output_bus(pNode, iOutputBus); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_attach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex, ma_node* pOtherNode, ma_uint32 otherNodeInputBusIndex) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_node_base* pOtherNodeBase = (ma_node_base*)pOtherNode; + + if (pNodeBase == NULL || pOtherNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + if (pNodeBase == pOtherNodeBase) { + return MA_INVALID_OPERATION; /* Cannot attach a node to itself. */ + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode) || otherNodeInputBusIndex >= ma_node_get_input_bus_count(pOtherNode)) { + return MA_INVALID_OPERATION; /* Invalid bus index. */ + } + + /* The output channel count of the output node must be the same as the input channel count of the input node. */ + if (ma_node_get_output_channels(pNode, outputBusIndex) != ma_node_get_input_channels(pOtherNode, otherNodeInputBusIndex)) { + return MA_INVALID_OPERATION; /* Channel count is incompatible. */ + } + + /* This will deal with detaching if the output bus is already attached to something. */ + ma_node_input_bus_attach(&pOtherNodeBase->pInputBuses[otherNodeInputBusIndex], &pNodeBase->pOutputBuses[outputBusIndex], pOtherNode, otherNodeInputBusIndex); + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_set_output_bus_volume(ma_node* pNode, ma_uint32 outputBusIndex, float volume) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return MA_INVALID_ARGS; /* Invalid bus index. */ + } + + return ma_node_output_bus_set_volume(&pNodeBase->pOutputBuses[outputBusIndex], volume); +} + +MA_API float ma_node_get_output_bus_volume(const ma_node* pNode, ma_uint32 outputBusIndex) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return 0; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return 0; /* Invalid bus index. */ + } + + return ma_node_output_bus_get_volume(&pNodeBase->pOutputBuses[outputBusIndex]); +} + +MA_API ma_result ma_node_set_state(ma_node* pNode, ma_node_state state) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_i32(&pNodeBase->state, state); + + return MA_SUCCESS; +} + +MA_API ma_node_state ma_node_get_state(const ma_node* pNode) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return ma_node_state_stopped; + } + + return (ma_node_state)ma_atomic_load_i32(&pNodeBase->state); +} + +MA_API ma_result ma_node_set_state_time(ma_node* pNode, ma_node_state state, ma_uint64 globalTime) +{ + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + /* Validation check for safety since we'll be using this as an index into stateTimes[]. */ + if (state != ma_node_state_started && state != ma_node_state_stopped) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_64(&((ma_node_base*)pNode)->stateTimes[state], globalTime); + + return MA_SUCCESS; +} + +MA_API ma_uint64 ma_node_get_state_time(const ma_node* pNode, ma_node_state state) +{ + if (pNode == NULL) { + return 0; + } + + /* Validation check for safety since we'll be using this as an index into stateTimes[]. */ + if (state != ma_node_state_started && state != ma_node_state_stopped) { + return 0; + } + + return ma_atomic_load_64(&((ma_node_base*)pNode)->stateTimes[state]); +} + +MA_API ma_node_state ma_node_get_state_by_time(const ma_node* pNode, ma_uint64 globalTime) +{ + if (pNode == NULL) { + return ma_node_state_stopped; + } + + return ma_node_get_state_by_time_range(pNode, globalTime, globalTime); +} + +MA_API ma_node_state ma_node_get_state_by_time_range(const ma_node* pNode, ma_uint64 globalTimeBeg, ma_uint64 globalTimeEnd) +{ + ma_node_state state; + + if (pNode == NULL) { + return ma_node_state_stopped; + } + + state = ma_node_get_state(pNode); + + /* An explicitly stopped node is always stopped. */ + if (state == ma_node_state_stopped) { + return ma_node_state_stopped; + } + + /* + Getting here means the node is marked as started, but it may still not be truly started due to + it's start time not having been reached yet. Also, the stop time may have also been reached in + which case it'll be considered stopped. + */ + if (ma_node_get_state_time(pNode, ma_node_state_started) > globalTimeBeg) { + return ma_node_state_stopped; /* Start time has not yet been reached. */ + } + + if (ma_node_get_state_time(pNode, ma_node_state_stopped) <= globalTimeEnd) { + return ma_node_state_stopped; /* Stop time has been reached. */ + } + + /* Getting here means the node is marked as started and is within it's start/stop times. */ + return ma_node_state_started; +} + +MA_API ma_uint64 ma_node_get_time(const ma_node* pNode) +{ + if (pNode == NULL) { + return 0; + } + + return ma_atomic_load_64(&((ma_node_base*)pNode)->localTime); +} + +MA_API ma_result ma_node_set_time(ma_node* pNode, ma_uint64 localTime) +{ + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_64(&((ma_node_base*)pNode)->localTime, localTime); + + return MA_SUCCESS; +} + + + +static void ma_node_process_pcm_frames_internal(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + MA_ASSERT(pNode != NULL); + + if (pNodeBase->vtable->onProcess) { + pNodeBase->vtable->onProcess(pNode, ppFramesIn, pFrameCountIn, ppFramesOut, pFrameCountOut); + } +} + +static ma_result ma_node_read_pcm_frames(ma_node* pNode, ma_uint32 outputBusIndex, float* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead, ma_uint64 globalTime) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_result result = MA_SUCCESS; + ma_uint32 iInputBus; + ma_uint32 iOutputBus; + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + ma_uint32 totalFramesRead = 0; + float* ppFramesIn[MA_MAX_NODE_BUS_COUNT]; + float* ppFramesOut[MA_MAX_NODE_BUS_COUNT]; + ma_uint64 globalTimeBeg; + ma_uint64 globalTimeEnd; + ma_uint64 startTime; + ma_uint64 stopTime; + ma_uint32 timeOffsetBeg; + ma_uint32 timeOffsetEnd; + ma_uint32 frameCountIn; + ma_uint32 frameCountOut; + + /* + pFramesRead is mandatory. It must be used to determine how many frames were read. It's normal and + expected that the number of frames read may be different to that requested. Therefore, the caller + must look at this value to correctly determine how many frames were read. + */ + MA_ASSERT(pFramesRead != NULL); /* <-- If you've triggered this assert, you're using this function wrong. You *must* use this variable and inspect it after the call returns. */ + if (pFramesRead == NULL) { + return MA_INVALID_ARGS; + } + + *pFramesRead = 0; /* Safety. */ + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNodeBase)) { + return MA_INVALID_ARGS; /* Invalid output bus index. */ + } + + /* Don't do anything if we're in a stopped state. */ + if (ma_node_get_state_by_time_range(pNode, globalTime, globalTime + frameCount) != ma_node_state_started) { + return MA_SUCCESS; /* We're in a stopped state. This is not an error - we just need to not read anything. */ + } + + + globalTimeBeg = globalTime; + globalTimeEnd = globalTime + frameCount; + startTime = ma_node_get_state_time(pNode, ma_node_state_started); + stopTime = ma_node_get_state_time(pNode, ma_node_state_stopped); + + /* + At this point we know that we are inside our start/stop times. However, we may need to adjust + our frame count and output pointer to accomodate since we could be straddling the time period + that this function is getting called for. + + It's possible (and likely) that the start time does not line up with the output buffer. We + therefore need to offset it by a number of frames to accomodate. The same thing applies for + the stop time. + */ + timeOffsetBeg = (globalTimeBeg < startTime) ? (ma_uint32)(globalTimeEnd - startTime) : 0; + timeOffsetEnd = (globalTimeEnd > stopTime) ? (ma_uint32)(globalTimeEnd - stopTime) : 0; + + /* Trim based on the start offset. We need to silence the start of the buffer. */ + if (timeOffsetBeg > 0) { + ma_silence_pcm_frames(pFramesOut, timeOffsetBeg, ma_format_f32, ma_node_get_output_channels(pNode, outputBusIndex)); + pFramesOut += timeOffsetBeg * ma_node_get_output_channels(pNode, outputBusIndex); + frameCount -= timeOffsetBeg; + } + + /* Trim based on the end offset. We don't need to silence the tail section because we'll just have a reduced value written to pFramesRead. */ + if (timeOffsetEnd > 0) { + frameCount -= timeOffsetEnd; + } + + + /* We run on different paths depending on the bus counts. */ + inputBusCount = ma_node_get_input_bus_count(pNode); + outputBusCount = ma_node_get_output_bus_count(pNode); + + /* + Run a simplified path when there are no inputs and one output. In this case there's nothing to + actually read and we can go straight to output. This is a very common scenario because the vast + majority of data source nodes will use this setup so this optimization I think is worthwhile. + */ + if (inputBusCount == 0 && outputBusCount == 1) { + /* Fast path. No need to read from input and no need for any caching. */ + frameCountIn = 0; + frameCountOut = frameCount; /* Just read as much as we can. The callback will return what was actually read. */ + + ppFramesOut[0] = pFramesOut; + + /* + If it's a passthrough we won't be expecting the callback to output anything, so we'll + need to pre-silence the output buffer. + */ + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_PASSTHROUGH) != 0) { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, ma_node_get_output_channels(pNode, outputBusIndex)); + } + + ma_node_process_pcm_frames_internal(pNode, NULL, &frameCountIn, ppFramesOut, &frameCountOut); + totalFramesRead = frameCountOut; + } else { + /* Slow path. Need to read input data. */ + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_PASSTHROUGH) != 0) { + /* + Fast path. We're running a passthrough. We need to read directly into the output buffer, but + still fire the callback so that event handling and trigger nodes can do their thing. Since + it's a passthrough there's no need for any kind of caching logic. + */ + MA_ASSERT(outputBusCount == inputBusCount); + MA_ASSERT(outputBusCount == 1); + MA_ASSERT(outputBusIndex == 0); + + /* We just read directly from input bus to output buffer, and then afterwards fire the callback. */ + ppFramesOut[0] = pFramesOut; + ppFramesIn[0] = ppFramesOut[0]; + + result = ma_node_input_bus_read_pcm_frames(pNodeBase, &pNodeBase->pInputBuses[0], ppFramesIn[0], frameCount, &totalFramesRead, globalTime); + if (result == MA_SUCCESS) { + /* Even though it's a passthrough, we still need to fire the callback. */ + frameCountIn = totalFramesRead; + frameCountOut = totalFramesRead; + + if (totalFramesRead > 0) { + ma_node_process_pcm_frames_internal(pNode, (const float**)ppFramesIn, &frameCountIn, ppFramesOut, &frameCountOut); /* From GCC: expected 'const float **' but argument is of type 'float **'. Shouldn't this be implicit? Excplicit cast to silence the warning. */ + } + + /* + A passthrough should never have modified the input and output frame counts. If you're + triggering these assers you need to fix your processing callback. + */ + MA_ASSERT(frameCountIn == totalFramesRead); + MA_ASSERT(frameCountOut == totalFramesRead); + } + } else { + /* Slow path. Need to do caching. */ + ma_uint32 framesToProcessIn; + ma_uint32 framesToProcessOut; + ma_bool32 consumeNullInput = MA_FALSE; + + /* + We use frameCount as a basis for the number of frames to read since that's what's being + requested, however we still need to clamp it to whatever can fit in the cache. + + This will also be used as the basis for determining how many input frames to read. This is + not ideal because it can result in too many input frames being read which introduces latency. + To solve this, nodes can implement an optional callback called onGetRequiredInputFrameCount + which is used as hint to miniaudio as to how many input frames it needs to read at a time. This + callback is completely optional, and if it's not set, miniaudio will assume `frameCount`. + + This function will be called multiple times for each period of time, once for each output node. + We cannot read from each input node each time this function is called. Instead we need to check + whether or not this is first output bus to be read from for this time period, and if so, read + from our input data. + + To determine whether or not we're ready to read data, we check a flag. There will be one flag + for each output. When the flag is set, it means data has been read previously and that we're + ready to advance time forward for our input nodes by reading fresh data. + */ + framesToProcessOut = frameCount; + if (framesToProcessOut > pNodeBase->cachedDataCapInFramesPerBus) { + framesToProcessOut = pNodeBase->cachedDataCapInFramesPerBus; + } + + framesToProcessIn = frameCount; + if (pNodeBase->vtable->onGetRequiredInputFrameCount) { + pNodeBase->vtable->onGetRequiredInputFrameCount(pNode, framesToProcessOut, &framesToProcessIn); /* <-- It does not matter if this fails. */ + } + if (framesToProcessIn > pNodeBase->cachedDataCapInFramesPerBus) { + framesToProcessIn = pNodeBase->cachedDataCapInFramesPerBus; + } + + + MA_ASSERT(framesToProcessIn <= 0xFFFF); + MA_ASSERT(framesToProcessOut <= 0xFFFF); + + if (ma_node_output_bus_has_read(&pNodeBase->pOutputBuses[outputBusIndex])) { + /* Getting here means we need to do another round of processing. */ + pNodeBase->cachedFrameCountOut = 0; + + for (;;) { + frameCountOut = 0; + + /* + We need to prepare our output frame pointers for processing. In the same iteration we need + to mark every output bus as unread so that future calls to this function for different buses + for the current time period don't pull in data when they should instead be reading from cache. + */ + for (iOutputBus = 0; iOutputBus < outputBusCount; iOutputBus += 1) { + ma_node_output_bus_set_has_read(&pNodeBase->pOutputBuses[iOutputBus], MA_FALSE); /* <-- This is what tells the next calls to this function for other output buses for this time period to read from cache instead of pulling in more data. */ + ppFramesOut[iOutputBus] = ma_node_get_cached_output_ptr(pNode, iOutputBus); + } + + /* We only need to read from input buses if there isn't already some data in the cache. */ + if (pNodeBase->cachedFrameCountIn == 0) { + ma_uint32 maxFramesReadIn = 0; + + /* Here is where we pull in data from the input buses. This is what will trigger an advance in time. */ + for (iInputBus = 0; iInputBus < inputBusCount; iInputBus += 1) { + ma_uint32 framesRead; + + /* The first thing to do is get the offset within our bulk allocation to store this input data. */ + ppFramesIn[iInputBus] = ma_node_get_cached_input_ptr(pNode, iInputBus); + + /* Once we've determined our destination pointer we can read. Note that we must inspect the number of frames read and fill any leftovers with silence for safety. */ + result = ma_node_input_bus_read_pcm_frames(pNodeBase, &pNodeBase->pInputBuses[iInputBus], ppFramesIn[iInputBus], framesToProcessIn, &framesRead, globalTime); + if (result != MA_SUCCESS) { + /* It doesn't really matter if we fail because we'll just fill with silence. */ + framesRead = 0; /* Just for safety, but I don't think it's really needed. */ + } + + /* TODO: Minor optimization opportunity here. If no frames were read and the buffer is already filled with silence, no need to re-silence it. */ + /* Any leftover frames need to silenced for safety. */ + if (framesRead < framesToProcessIn) { + ma_silence_pcm_frames(ppFramesIn[iInputBus] + (framesRead * ma_node_get_input_channels(pNodeBase, iInputBus)), (framesToProcessIn - framesRead), ma_format_f32, ma_node_get_input_channels(pNodeBase, iInputBus)); + } + + maxFramesReadIn = ma_max(maxFramesReadIn, framesRead); + } + + /* This was a fresh load of input data so reset our consumption counter. */ + pNodeBase->consumedFrameCountIn = 0; + + /* + We don't want to keep processing if there's nothing to process, so set the number of cached + input frames to the maximum number we read from each attachment (the lesser will be padded + with silence). If we didn't read anything, this will be set to 0 and the entire buffer will + have been assigned to silence. This being equal to 0 is an important property for us because + it allows us to detect when NULL can be passed into the processing callback for the input + buffer for the purpose of continuous processing. + */ + pNodeBase->cachedFrameCountIn = (ma_uint16)maxFramesReadIn; + } else { + /* We don't need to read anything, but we do need to prepare our input frame pointers. */ + for (iInputBus = 0; iInputBus < inputBusCount; iInputBus += 1) { + ppFramesIn[iInputBus] = ma_node_get_cached_input_ptr(pNode, iInputBus) + (pNodeBase->consumedFrameCountIn * ma_node_get_input_channels(pNodeBase, iInputBus)); + } + } + + /* + At this point we have our input data so now we need to do some processing. Sneaky little + optimization here - we can set the pointer to the output buffer for this output bus so + that the final copy into the output buffer is done directly by onProcess(). + */ + if (pFramesOut != NULL) { + ppFramesOut[outputBusIndex] = ma_offset_pcm_frames_ptr_f32(pFramesOut, pNodeBase->cachedFrameCountOut, ma_node_get_output_channels(pNode, outputBusIndex)); + } + + + /* Give the processing function the entire capacity of the output buffer. */ + frameCountOut = (framesToProcessOut - pNodeBase->cachedFrameCountOut); + + /* + We need to treat nodes with continuous processing a little differently. For these ones, + we always want to fire the callback with the requested number of frames, regardless of + pNodeBase->cachedFrameCountIn, which could be 0. Also, we want to check if we can pass + in NULL for the input buffer to the callback. + */ + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_CONTINUOUS_PROCESSING) != 0) { + /* We're using continuous processing. Make sure we specify the whole frame count at all times. */ + frameCountIn = framesToProcessIn; /* Give the processing function as much input data as we've got in the buffer, including any silenced padding from short reads. */ + + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_ALLOW_NULL_INPUT) != 0 && pNodeBase->consumedFrameCountIn == 0 && pNodeBase->cachedFrameCountIn == 0) { + consumeNullInput = MA_TRUE; + } else { + consumeNullInput = MA_FALSE; + } + + /* + Since we're using continuous processing we're always passing in a full frame count + regardless of how much input data was read. If this is greater than what we read as + input, we'll end up with an underflow. We instead need to make sure our cached frame + count is set to the number of frames we'll be passing to the data callback. Not + doing this will result in an underflow when we "consume" the cached data later on. + + Note that this check needs to be done after the "consumeNullInput" check above because + we use the property of cachedFrameCountIn being 0 to determine whether or not we + should be passing in a null pointer to the processing callback for when the node is + configured with MA_NODE_FLAG_ALLOW_NULL_INPUT. + */ + if (pNodeBase->cachedFrameCountIn < (ma_uint16)frameCountIn) { + pNodeBase->cachedFrameCountIn = (ma_uint16)frameCountIn; + } + } else { + frameCountIn = pNodeBase->cachedFrameCountIn; /* Give the processing function as much valid input data as we've got. */ + consumeNullInput = MA_FALSE; + } + + /* + Process data slightly differently depending on whether or not we're consuming NULL + input (checked just above). + */ + if (consumeNullInput) { + ma_node_process_pcm_frames_internal(pNode, NULL, &frameCountIn, ppFramesOut, &frameCountOut); + } else { + /* + We want to skip processing if there's no input data, but we can only do that safely if + we know that there is no chance of any output frames being produced. If continuous + processing is being used, this won't be a problem because the input frame count will + always be non-0. However, if continuous processing is *not* enabled and input and output + data is processed at different rates, we still need to process that last input frame + because there could be a few excess output frames needing to be produced from cached + data. The `MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES` flag is used as the indicator for + determining whether or not we need to process the node even when there are no input + frames available right now. + */ + if (frameCountIn > 0 || (pNodeBase->vtable->flags & MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES) != 0) { + ma_node_process_pcm_frames_internal(pNode, (const float**)ppFramesIn, &frameCountIn, ppFramesOut, &frameCountOut); /* From GCC: expected 'const float **' but argument is of type 'float **'. Shouldn't this be implicit? Excplicit cast to silence the warning. */ + } else { + frameCountOut = 0; /* No data was processed. */ + } + } + + /* + Thanks to our sneaky optimization above we don't need to do any data copying directly into + the output buffer - the onProcess() callback just did that for us. We do, however, need to + apply the number of input and output frames that were processed. Note that due to continuous + processing above, we need to do explicit checks here. If we just consumed a NULL input + buffer it means that no actual input data was processed from the internal buffers and we + don't want to be modifying any counters. + */ + if (consumeNullInput == MA_FALSE) { + pNodeBase->consumedFrameCountIn += (ma_uint16)frameCountIn; + pNodeBase->cachedFrameCountIn -= (ma_uint16)frameCountIn; + } + + /* The cached output frame count is always equal to what we just read. */ + pNodeBase->cachedFrameCountOut += (ma_uint16)frameCountOut; + + /* If we couldn't process any data, we're done. The loop needs to be terminated here or else we'll get stuck in a loop. */ + if (pNodeBase->cachedFrameCountOut == framesToProcessOut || (frameCountOut == 0 && frameCountIn == 0)) { + break; + } + } + } else { + /* + We're not needing to read anything from the input buffer so just read directly from our + already-processed data. + */ + if (pFramesOut != NULL) { + ma_copy_pcm_frames(pFramesOut, ma_node_get_cached_output_ptr(pNodeBase, outputBusIndex), pNodeBase->cachedFrameCountOut, ma_format_f32, ma_node_get_output_channels(pNodeBase, outputBusIndex)); + } + } + + /* The number of frames read is always equal to the number of cached output frames. */ + totalFramesRead = pNodeBase->cachedFrameCountOut; + + /* Now that we've read the data, make sure our read flag is set. */ + ma_node_output_bus_set_has_read(&pNodeBase->pOutputBuses[outputBusIndex], MA_TRUE); + } + } + + /* Apply volume, if necessary. */ + ma_apply_volume_factor_f32(pFramesOut, totalFramesRead * ma_node_get_output_channels(pNodeBase, outputBusIndex), ma_node_output_bus_get_volume(&pNodeBase->pOutputBuses[outputBusIndex])); + + /* Advance our local time forward. */ + ma_atomic_fetch_add_64(&pNodeBase->localTime, (ma_uint64)totalFramesRead); + + *pFramesRead = totalFramesRead + timeOffsetBeg; /* Must include the silenced section at the start of the buffer. */ + return result; +} + + + + +/* Data source node. */ +MA_API ma_data_source_node_config ma_data_source_node_config_init(ma_data_source* pDataSource) +{ + ma_data_source_node_config config; + + MA_ZERO_OBJECT(&config); + config.nodeConfig = ma_node_config_init(); + config.pDataSource = pDataSource; + + return config; +} + + +static void ma_data_source_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_data_source_node* pDataSourceNode = (ma_data_source_node*)pNode; + ma_format format; + ma_uint32 channels; + ma_uint32 frameCount; + ma_uint64 framesRead = 0; + + MA_ASSERT(pDataSourceNode != NULL); + MA_ASSERT(pDataSourceNode->pDataSource != NULL); + MA_ASSERT(ma_node_get_input_bus_count(pDataSourceNode) == 0); + MA_ASSERT(ma_node_get_output_bus_count(pDataSourceNode) == 1); + + /* We don't want to read from ppFramesIn at all. Instead we read from the data source. */ + (void)ppFramesIn; + (void)pFrameCountIn; + + frameCount = *pFrameCountOut; + + /* miniaudio should never be calling this with a frame count of zero. */ + MA_ASSERT(frameCount > 0); + + if (ma_data_source_get_data_format(pDataSourceNode->pDataSource, &format, &channels, NULL, NULL, 0) == MA_SUCCESS) { /* <-- Don't care about sample rate here. */ + /* The node graph system requires samples be in floating point format. This is checked in ma_data_source_node_init(). */ + MA_ASSERT(format == ma_format_f32); + (void)format; /* Just to silence some static analysis tools. */ + + ma_data_source_read_pcm_frames(pDataSourceNode->pDataSource, ppFramesOut[0], frameCount, &framesRead); + } + + *pFrameCountOut = (ma_uint32)framesRead; +} + +static ma_node_vtable g_ma_data_source_node_vtable = +{ + ma_data_source_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 0, /* 0 input buses. */ + 1, /* 1 output bus. */ + 0 +}; + +MA_API ma_result ma_data_source_node_init(ma_node_graph* pNodeGraph, const ma_data_source_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source_node* pDataSourceNode) +{ + ma_result result; + ma_format format; /* For validating the format, which must be ma_format_f32. */ + ma_uint32 channels; /* For specifying the channel count of the output bus. */ + ma_node_config baseConfig; + + if (pDataSourceNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataSourceNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_data_source_get_data_format(pConfig->pDataSource, &format, &channels, NULL, NULL, 0); /* Don't care about sample rate. This will check pDataSource for NULL. */ + if (result != MA_SUCCESS) { + return result; + } + + MA_ASSERT(format == ma_format_f32); /* <-- If you've triggered this it means your data source is not outputting floating-point samples. You must configure your data source to use ma_format_f32. */ + if (format != ma_format_f32) { + return MA_INVALID_ARGS; /* Invalid format. */ + } + + /* The channel count is defined by the data source. If the caller has manually changed the channels we just ignore it. */ + baseConfig = pConfig->nodeConfig; + baseConfig.vtable = &g_ma_data_source_node_vtable; /* Explicitly set the vtable here to prevent callers from setting it incorrectly. */ + + /* + The channel count is defined by the data source. It is invalid for the caller to manually set + the channel counts in the config. `ma_data_source_node_config_init()` will have defaulted the + channel count pointer to NULL which is how it must remain. If you trigger any of these asserts + it means you're explicitly setting the channel count. Instead, configure the output channel + count of your data source to be the necessary channel count. + */ + if (baseConfig.pOutputChannels != NULL) { + return MA_INVALID_ARGS; + } + + baseConfig.pOutputChannels = &channels; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pDataSourceNode->base); + if (result != MA_SUCCESS) { + return result; + } + + pDataSourceNode->pDataSource = pConfig->pDataSource; + + return MA_SUCCESS; +} + +MA_API void ma_data_source_node_uninit(ma_data_source_node* pDataSourceNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_node_uninit(&pDataSourceNode->base, pAllocationCallbacks); +} + +MA_API ma_result ma_data_source_node_set_looping(ma_data_source_node* pDataSourceNode, ma_bool32 isLooping) +{ + if (pDataSourceNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_data_source_set_looping(pDataSourceNode->pDataSource, isLooping); +} + +MA_API ma_bool32 ma_data_source_node_is_looping(ma_data_source_node* pDataSourceNode) +{ + if (pDataSourceNode == NULL) { + return MA_FALSE; + } + + return ma_data_source_is_looping(pDataSourceNode->pDataSource); +} + + + +/* Splitter Node. */ +MA_API ma_splitter_node_config ma_splitter_node_config_init(ma_uint32 channels) +{ + ma_splitter_node_config config; + + MA_ZERO_OBJECT(&config); + config.nodeConfig = ma_node_config_init(); + config.channels = channels; + config.outputBusCount = 2; + + return config; +} + + +static void ma_splitter_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iOutputBus; + ma_uint32 channels; + + MA_ASSERT(pNodeBase != NULL); + MA_ASSERT(ma_node_get_input_bus_count(pNodeBase) == 1); + + /* We don't need to consider the input frame count - it'll be the same as the output frame count and we process everything. */ + (void)pFrameCountIn; + + /* NOTE: This assumes the same number of channels for all inputs and outputs. This was checked in ma_splitter_node_init(). */ + channels = ma_node_get_input_channels(pNodeBase, 0); + + /* Splitting is just copying the first input bus and copying it over to each output bus. */ + for (iOutputBus = 0; iOutputBus < ma_node_get_output_bus_count(pNodeBase); iOutputBus += 1) { + ma_copy_pcm_frames(ppFramesOut[iOutputBus], ppFramesIn[0], *pFrameCountOut, ma_format_f32, channels); + } +} + +static ma_node_vtable g_ma_splitter_node_vtable = +{ + ma_splitter_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* 1 input bus. */ + MA_NODE_BUS_COUNT_UNKNOWN, /* The output bus count is specified on a per-node basis. */ + 0 +}; + +MA_API ma_result ma_splitter_node_init(ma_node_graph* pNodeGraph, const ma_splitter_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_splitter_node* pSplitterNode) +{ + ma_result result; + ma_node_config baseConfig; + ma_uint32 pInputChannels[1]; + ma_uint32 pOutputChannels[MA_MAX_NODE_BUS_COUNT]; + ma_uint32 iOutputBus; + + if (pSplitterNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pSplitterNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->outputBusCount > MA_MAX_NODE_BUS_COUNT) { + return MA_INVALID_ARGS; /* Too many output buses. */ + } + + /* Splitters require the same number of channels between inputs and outputs. */ + pInputChannels[0] = pConfig->channels; + for (iOutputBus = 0; iOutputBus < pConfig->outputBusCount; iOutputBus += 1) { + pOutputChannels[iOutputBus] = pConfig->channels; + } + + baseConfig = pConfig->nodeConfig; + baseConfig.vtable = &g_ma_splitter_node_vtable; + baseConfig.pInputChannels = pInputChannels; + baseConfig.pOutputChannels = pOutputChannels; + baseConfig.outputBusCount = pConfig->outputBusCount; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pSplitterNode->base); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base node. */ + } + + return MA_SUCCESS; +} + +MA_API void ma_splitter_node_uninit(ma_splitter_node* pSplitterNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_node_uninit(pSplitterNode, pAllocationCallbacks); +} + + +/* +Biquad Node +*/ +MA_API ma_biquad_node_config ma_biquad_node_config_init(ma_uint32 channels, float b0, float b1, float b2, float a0, float a1, float a2) +{ + ma_biquad_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.biquad = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2); + + return config; +} + +static void ma_biquad_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_biquad_node* pLPFNode = (ma_biquad_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_biquad_process_pcm_frames(&pLPFNode->biquad, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_biquad_node_vtable = +{ + ma_biquad_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_biquad_node_init(ma_node_graph* pNodeGraph, const ma_biquad_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->biquad.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_biquad_init(&pConfig->biquad, pAllocationCallbacks, &pNode->biquad); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_biquad_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->biquad.channels; + baseNodeConfig.pOutputChannels = &pConfig->biquad.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_biquad_node_reinit(const ma_biquad_config* pConfig, ma_biquad_node* pNode) +{ + ma_biquad_node* pLPFNode = (ma_biquad_node*)pNode; + + MA_ASSERT(pNode != NULL); + + return ma_biquad_reinit(pConfig, &pLPFNode->biquad); +} + +MA_API void ma_biquad_node_uninit(ma_biquad_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_biquad_node* pLPFNode = (ma_biquad_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_biquad_uninit(&pLPFNode->biquad, pAllocationCallbacks); +} + + + +/* +Low Pass Filter Node +*/ +MA_API ma_lpf_node_config ma_lpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_lpf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.lpf = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + + return config; +} + +static void ma_lpf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_lpf_node* pLPFNode = (ma_lpf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_lpf_process_pcm_frames(&pLPFNode->lpf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_lpf_node_vtable = +{ + ma_lpf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_lpf_node_init(ma_node_graph* pNodeGraph, const ma_lpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->lpf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_lpf_init(&pConfig->lpf, pAllocationCallbacks, &pNode->lpf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_lpf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->lpf.channels; + baseNodeConfig.pOutputChannels = &pConfig->lpf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_lpf_node_reinit(const ma_lpf_config* pConfig, ma_lpf_node* pNode) +{ + ma_lpf_node* pLPFNode = (ma_lpf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_lpf_reinit(pConfig, &pLPFNode->lpf); +} + +MA_API void ma_lpf_node_uninit(ma_lpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_lpf_node* pLPFNode = (ma_lpf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_lpf_uninit(&pLPFNode->lpf, pAllocationCallbacks); +} + + + +/* +High Pass Filter Node +*/ +MA_API ma_hpf_node_config ma_hpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_hpf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.hpf = ma_hpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + + return config; +} + +static void ma_hpf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_hpf_node* pHPFNode = (ma_hpf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_hpf_process_pcm_frames(&pHPFNode->hpf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_hpf_node_vtable = +{ + ma_hpf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_hpf_node_init(ma_node_graph* pNodeGraph, const ma_hpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->hpf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_hpf_init(&pConfig->hpf, pAllocationCallbacks, &pNode->hpf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_hpf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->hpf.channels; + baseNodeConfig.pOutputChannels = &pConfig->hpf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_hpf_node_reinit(const ma_hpf_config* pConfig, ma_hpf_node* pNode) +{ + ma_hpf_node* pHPFNode = (ma_hpf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_hpf_reinit(pConfig, &pHPFNode->hpf); +} + +MA_API void ma_hpf_node_uninit(ma_hpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_hpf_node* pHPFNode = (ma_hpf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_hpf_uninit(&pHPFNode->hpf, pAllocationCallbacks); +} + + + + +/* +Band Pass Filter Node +*/ +MA_API ma_bpf_node_config ma_bpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_bpf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.bpf = ma_bpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + + return config; +} + +static void ma_bpf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_bpf_node* pBPFNode = (ma_bpf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_bpf_process_pcm_frames(&pBPFNode->bpf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_bpf_node_vtable = +{ + ma_bpf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_bpf_node_init(ma_node_graph* pNodeGraph, const ma_bpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->bpf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_bpf_init(&pConfig->bpf, pAllocationCallbacks, &pNode->bpf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_bpf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->bpf.channels; + baseNodeConfig.pOutputChannels = &pConfig->bpf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_bpf_node_reinit(const ma_bpf_config* pConfig, ma_bpf_node* pNode) +{ + ma_bpf_node* pBPFNode = (ma_bpf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_bpf_reinit(pConfig, &pBPFNode->bpf); +} + +MA_API void ma_bpf_node_uninit(ma_bpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bpf_node* pBPFNode = (ma_bpf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_bpf_uninit(&pBPFNode->bpf, pAllocationCallbacks); +} + + + +/* +Notching Filter Node +*/ +MA_API ma_notch_node_config ma_notch_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency) +{ + ma_notch_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.notch = ma_notch2_config_init(ma_format_f32, channels, sampleRate, q, frequency); + + return config; +} + +static void ma_notch_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_notch_node* pBPFNode = (ma_notch_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_notch2_process_pcm_frames(&pBPFNode->notch, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_notch_node_vtable = +{ + ma_notch_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_notch_node_init(ma_node_graph* pNodeGraph, const ma_notch_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->notch.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_notch2_init(&pConfig->notch, pAllocationCallbacks, &pNode->notch); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_notch_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->notch.channels; + baseNodeConfig.pOutputChannels = &pConfig->notch.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_notch_node_reinit(const ma_notch_config* pConfig, ma_notch_node* pNode) +{ + ma_notch_node* pNotchNode = (ma_notch_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_notch2_reinit(pConfig, &pNotchNode->notch); +} + +MA_API void ma_notch_node_uninit(ma_notch_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_notch_node* pNotchNode = (ma_notch_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_notch2_uninit(&pNotchNode->notch, pAllocationCallbacks); +} + + + +/* +Peaking Filter Node +*/ +MA_API ma_peak_node_config ma_peak_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_peak_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.peak = ma_peak2_config_init(ma_format_f32, channels, sampleRate, gainDB, q, frequency); + + return config; +} + +static void ma_peak_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_peak_node* pBPFNode = (ma_peak_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_peak2_process_pcm_frames(&pBPFNode->peak, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_peak_node_vtable = +{ + ma_peak_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_peak_node_init(ma_node_graph* pNodeGraph, const ma_peak_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->peak.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_peak2_init(&pConfig->peak, pAllocationCallbacks, &pNode->peak); + if (result != MA_SUCCESS) { + ma_node_uninit(pNode, pAllocationCallbacks); + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_peak_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->peak.channels; + baseNodeConfig.pOutputChannels = &pConfig->peak.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_peak_node_reinit(const ma_peak_config* pConfig, ma_peak_node* pNode) +{ + ma_peak_node* pPeakNode = (ma_peak_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_peak2_reinit(pConfig, &pPeakNode->peak); +} + +MA_API void ma_peak_node_uninit(ma_peak_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_peak_node* pPeakNode = (ma_peak_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_peak2_uninit(&pPeakNode->peak, pAllocationCallbacks); +} + + + +/* +Low Shelf Filter Node +*/ +MA_API ma_loshelf_node_config ma_loshelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_loshelf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.loshelf = ma_loshelf2_config_init(ma_format_f32, channels, sampleRate, gainDB, q, frequency); + + return config; +} + +static void ma_loshelf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_loshelf_node* pBPFNode = (ma_loshelf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_loshelf2_process_pcm_frames(&pBPFNode->loshelf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_loshelf_node_vtable = +{ + ma_loshelf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_loshelf_node_init(ma_node_graph* pNodeGraph, const ma_loshelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->loshelf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_loshelf2_init(&pConfig->loshelf, pAllocationCallbacks, &pNode->loshelf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_loshelf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->loshelf.channels; + baseNodeConfig.pOutputChannels = &pConfig->loshelf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_loshelf_node_reinit(const ma_loshelf_config* pConfig, ma_loshelf_node* pNode) +{ + ma_loshelf_node* pLoshelfNode = (ma_loshelf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_loshelf2_reinit(pConfig, &pLoshelfNode->loshelf); +} + +MA_API void ma_loshelf_node_uninit(ma_loshelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_loshelf_node* pLoshelfNode = (ma_loshelf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_loshelf2_uninit(&pLoshelfNode->loshelf, pAllocationCallbacks); +} + + + +/* +High Shelf Filter Node +*/ +MA_API ma_hishelf_node_config ma_hishelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_hishelf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.hishelf = ma_hishelf2_config_init(ma_format_f32, channels, sampleRate, gainDB, q, frequency); + + return config; +} + +static void ma_hishelf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_hishelf_node* pBPFNode = (ma_hishelf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_hishelf2_process_pcm_frames(&pBPFNode->hishelf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_hishelf_node_vtable = +{ + ma_hishelf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_hishelf_node_init(ma_node_graph* pNodeGraph, const ma_hishelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->hishelf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_hishelf2_init(&pConfig->hishelf, pAllocationCallbacks, &pNode->hishelf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_hishelf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->hishelf.channels; + baseNodeConfig.pOutputChannels = &pConfig->hishelf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_hishelf_node_reinit(const ma_hishelf_config* pConfig, ma_hishelf_node* pNode) +{ + ma_hishelf_node* pHishelfNode = (ma_hishelf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_hishelf2_reinit(pConfig, &pHishelfNode->hishelf); +} + +MA_API void ma_hishelf_node_uninit(ma_hishelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_hishelf_node* pHishelfNode = (ma_hishelf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_hishelf2_uninit(&pHishelfNode->hishelf, pAllocationCallbacks); +} + + + + +MA_API ma_delay_node_config ma_delay_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay) +{ + ma_delay_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.delay = ma_delay_config_init(channels, sampleRate, delayInFrames, decay); + + return config; +} + + +static void ma_delay_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_delay_node* pDelayNode = (ma_delay_node*)pNode; + + (void)pFrameCountIn; + + ma_delay_process_pcm_frames(&pDelayNode->delay, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_delay_node_vtable = +{ + ma_delay_node_process_pcm_frames, + NULL, + 1, /* 1 input channels. */ + 1, /* 1 output channel. */ + MA_NODE_FLAG_CONTINUOUS_PROCESSING /* Delay requires continuous processing to ensure the tail get's processed. */ +}; + +MA_API ma_result ma_delay_node_init(ma_node_graph* pNodeGraph, const ma_delay_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay_node* pDelayNode) +{ + ma_result result; + ma_node_config baseConfig; + + if (pDelayNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDelayNode); + + result = ma_delay_init(&pConfig->delay, pAllocationCallbacks, &pDelayNode->delay); + if (result != MA_SUCCESS) { + return result; + } + + baseConfig = pConfig->nodeConfig; + baseConfig.vtable = &g_ma_delay_node_vtable; + baseConfig.pInputChannels = &pConfig->delay.channels; + baseConfig.pOutputChannels = &pConfig->delay.channels; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pDelayNode->baseNode); + if (result != MA_SUCCESS) { + ma_delay_uninit(&pDelayNode->delay, pAllocationCallbacks); + return result; + } + + return result; +} + +MA_API void ma_delay_node_uninit(ma_delay_node* pDelayNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pDelayNode == NULL) { + return; + } + + /* The base node is always uninitialized first. */ + ma_node_uninit(pDelayNode, pAllocationCallbacks); + ma_delay_uninit(&pDelayNode->delay, pAllocationCallbacks); +} + +MA_API void ma_delay_node_set_wet(ma_delay_node* pDelayNode, float value) +{ + if (pDelayNode == NULL) { + return; + } + + ma_delay_set_wet(&pDelayNode->delay, value); +} + +MA_API float ma_delay_node_get_wet(const ma_delay_node* pDelayNode) +{ + if (pDelayNode == NULL) { + return 0; + } + + return ma_delay_get_wet(&pDelayNode->delay); +} + +MA_API void ma_delay_node_set_dry(ma_delay_node* pDelayNode, float value) +{ + if (pDelayNode == NULL) { + return; + } + + ma_delay_set_dry(&pDelayNode->delay, value); +} + +MA_API float ma_delay_node_get_dry(const ma_delay_node* pDelayNode) +{ + if (pDelayNode == NULL) { + return 0; + } + + return ma_delay_get_dry(&pDelayNode->delay); +} + +MA_API void ma_delay_node_set_decay(ma_delay_node* pDelayNode, float value) +{ + if (pDelayNode == NULL) { + return; + } + + ma_delay_set_decay(&pDelayNode->delay, value); +} + +MA_API float ma_delay_node_get_decay(const ma_delay_node* pDelayNode) +{ + if (pDelayNode == NULL) { + return 0; + } + + return ma_delay_get_decay(&pDelayNode->delay); +} +#endif /* MA_NO_NODE_GRAPH */ + + +/* SECTION: miniaudio_engine.c */ +#if !defined(MA_NO_ENGINE) && !defined(MA_NO_NODE_GRAPH) +/************************************************************************************************************************************************************** + +Engine + +**************************************************************************************************************************************************************/ +#define MA_SEEK_TARGET_NONE (~(ma_uint64)0) + + +static void ma_sound_set_at_end(ma_sound* pSound, ma_bool32 atEnd) +{ + MA_ASSERT(pSound != NULL); + ma_atomic_exchange_32(&pSound->atEnd, atEnd); + + /* Fire any callbacks or events. */ + if (atEnd) { + if (pSound->endCallback != NULL) { + pSound->endCallback(pSound->pEndCallbackUserData, pSound); + } + } +} + +static ma_bool32 ma_sound_get_at_end(const ma_sound* pSound) +{ + MA_ASSERT(pSound != NULL); + return ma_atomic_load_32(&pSound->atEnd); +} + + +MA_API ma_engine_node_config ma_engine_node_config_init(ma_engine* pEngine, ma_engine_node_type type, ma_uint32 flags) +{ + ma_engine_node_config config; + + MA_ZERO_OBJECT(&config); + config.pEngine = pEngine; + config.type = type; + config.isPitchDisabled = (flags & MA_SOUND_FLAG_NO_PITCH) != 0; + config.isSpatializationDisabled = (flags & MA_SOUND_FLAG_NO_SPATIALIZATION) != 0; + config.monoExpansionMode = pEngine->monoExpansionMode; + + return config; +} + + +static void ma_engine_node_update_pitch_if_required(ma_engine_node* pEngineNode) +{ + ma_bool32 isUpdateRequired = MA_FALSE; + float newPitch; + + MA_ASSERT(pEngineNode != NULL); + + newPitch = ma_atomic_load_explicit_f32(&pEngineNode->pitch, ma_atomic_memory_order_acquire); + + if (pEngineNode->oldPitch != newPitch) { + pEngineNode->oldPitch = newPitch; + isUpdateRequired = MA_TRUE; + } + + if (pEngineNode->oldDopplerPitch != pEngineNode->spatializer.dopplerPitch) { + pEngineNode->oldDopplerPitch = pEngineNode->spatializer.dopplerPitch; + isUpdateRequired = MA_TRUE; + } + + if (isUpdateRequired) { + float basePitch = (float)pEngineNode->sampleRate / ma_engine_get_sample_rate(pEngineNode->pEngine); + ma_linear_resampler_set_rate_ratio(&pEngineNode->resampler, basePitch * pEngineNode->oldPitch * pEngineNode->oldDopplerPitch); + } +} + +static ma_bool32 ma_engine_node_is_pitching_enabled(const ma_engine_node* pEngineNode) +{ + MA_ASSERT(pEngineNode != NULL); + + /* Don't try to be clever by skiping resampling in the pitch=1 case or else you'll glitch when moving away from 1. */ + return !ma_atomic_load_explicit_32(&pEngineNode->isPitchDisabled, ma_atomic_memory_order_acquire); +} + +static ma_bool32 ma_engine_node_is_spatialization_enabled(const ma_engine_node* pEngineNode) +{ + MA_ASSERT(pEngineNode != NULL); + + return !ma_atomic_load_explicit_32(&pEngineNode->isSpatializationDisabled, ma_atomic_memory_order_acquire); +} + +static ma_uint64 ma_engine_node_get_required_input_frame_count(const ma_engine_node* pEngineNode, ma_uint64 outputFrameCount) +{ + ma_uint64 inputFrameCount = 0; + + if (ma_engine_node_is_pitching_enabled(pEngineNode)) { + ma_result result = ma_linear_resampler_get_required_input_frame_count(&pEngineNode->resampler, outputFrameCount, &inputFrameCount); + if (result != MA_SUCCESS) { + inputFrameCount = 0; + } + } else { + inputFrameCount = outputFrameCount; /* No resampling, so 1:1. */ + } + + return inputFrameCount; +} + +static ma_result ma_engine_node_set_volume(ma_engine_node* pEngineNode, float volume) +{ + if (pEngineNode == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_float_set(&pEngineNode->volume, volume); + + /* If we're not smoothing we should bypass the volume gainer entirely. */ + if (pEngineNode->volumeSmoothTimeInPCMFrames == 0) { + /* We should always have an active spatializer because it can be enabled and disabled dynamically. We can just use that for hodling our volume. */ + ma_spatializer_set_master_volume(&pEngineNode->spatializer, volume); + } else { + /* We're using volume smoothing, so apply the master volume to the gainer. */ + ma_gainer_set_gain(&pEngineNode->volumeGainer, volume); + } + + return MA_SUCCESS; +} + +static ma_result ma_engine_node_get_volume(const ma_engine_node* pEngineNode, float* pVolume) +{ + if (pVolume == NULL) { + return MA_INVALID_ARGS; + } + + *pVolume = 0.0f; + + if (pEngineNode == NULL) { + return MA_INVALID_ARGS; + } + + *pVolume = ma_atomic_float_get((ma_atomic_float*)&pEngineNode->volume); + + return MA_SUCCESS; +} + + +static void ma_engine_node_process_pcm_frames__general(ma_engine_node* pEngineNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_uint32 frameCountIn; + ma_uint32 frameCountOut; + ma_uint32 totalFramesProcessedIn; + ma_uint32 totalFramesProcessedOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_bool32 isPitchingEnabled; + ma_bool32 isFadingEnabled; + ma_bool32 isSpatializationEnabled; + ma_bool32 isPanningEnabled; + ma_bool32 isVolumeSmoothingEnabled; + + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + + channelsIn = ma_spatializer_get_input_channels(&pEngineNode->spatializer); + channelsOut = ma_spatializer_get_output_channels(&pEngineNode->spatializer); + + totalFramesProcessedIn = 0; + totalFramesProcessedOut = 0; + + /* Update the fader if applicable. */ + { + ma_uint64 fadeLengthInFrames = ma_atomic_uint64_get(&pEngineNode->fadeSettings.fadeLengthInFrames); + if (fadeLengthInFrames != ~(ma_uint64)0) { + float fadeVolumeBeg = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeBeg); + float fadeVolumeEnd = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeEnd); + ma_int64 fadeStartOffsetInFrames = (ma_int64)ma_atomic_uint64_get(&pEngineNode->fadeSettings.absoluteGlobalTimeInFrames); + if (fadeStartOffsetInFrames == (ma_int64)(~(ma_uint64)0)) { + fadeStartOffsetInFrames = 0; + } else { + fadeStartOffsetInFrames -= ma_engine_get_time(pEngineNode->pEngine); + } + + ma_fader_set_fade_ex(&pEngineNode->fader, fadeVolumeBeg, fadeVolumeEnd, fadeLengthInFrames, fadeStartOffsetInFrames); + + /* Reset the fade length so we don't erroneously apply it again. */ + ma_atomic_uint64_set(&pEngineNode->fadeSettings.fadeLengthInFrames, ~(ma_uint64)0); + } + } + + isPitchingEnabled = ma_engine_node_is_pitching_enabled(pEngineNode); + isFadingEnabled = pEngineNode->fader.volumeBeg != 1 || pEngineNode->fader.volumeEnd != 1; + isSpatializationEnabled = ma_engine_node_is_spatialization_enabled(pEngineNode); + isPanningEnabled = pEngineNode->panner.pan != 0 && channelsOut != 1; + isVolumeSmoothingEnabled = pEngineNode->volumeSmoothTimeInPCMFrames > 0; + + /* Keep going while we've still got data available for processing. */ + while (totalFramesProcessedOut < frameCountOut) { + /* + We need to process in a specific order. We always do resampling first because it's likely + we're going to be increasing the channel count after spatialization. Also, I want to do + fading based on the output sample rate. + + We'll first read into a buffer from the resampler. Then we'll do all processing that + operates on the on the input channel count. We'll then get the spatializer to output to + the output buffer and then do all effects from that point directly in the output buffer + in-place. + + Note that we're always running the resampler if pitching is enabled, even when the pitch + is 1. If we try to be clever and skip resampling when the pitch is 1, we'll get a glitch + when we move away from 1, back to 1, and then away from 1 again. We'll want to implement + any pitch=1 optimizations in the resampler itself. + + There's a small optimization here that we'll utilize since it might be a fairly common + case. When the input and output channel counts are the same, we'll read straight into the + output buffer from the resampler and do everything in-place. + */ + const float* pRunningFramesIn; + float* pRunningFramesOut; + float* pWorkingBuffer; /* This is the buffer that we'll be processing frames in. This is in input channels. */ + float temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE / sizeof(float)]; + ma_uint32 tempCapInFrames = ma_countof(temp) / channelsIn; + ma_uint32 framesAvailableIn; + ma_uint32 framesAvailableOut; + ma_uint32 framesJustProcessedIn; + ma_uint32 framesJustProcessedOut; + ma_bool32 isWorkingBufferValid = MA_FALSE; + + framesAvailableIn = frameCountIn - totalFramesProcessedIn; + framesAvailableOut = frameCountOut - totalFramesProcessedOut; + + pRunningFramesIn = ma_offset_pcm_frames_const_ptr_f32(ppFramesIn[0], totalFramesProcessedIn, channelsIn); + pRunningFramesOut = ma_offset_pcm_frames_ptr_f32(ppFramesOut[0], totalFramesProcessedOut, channelsOut); + + if (channelsIn == channelsOut) { + /* Fast path. Channel counts are the same. No need for an intermediary input buffer. */ + pWorkingBuffer = pRunningFramesOut; + } else { + /* Slow path. Channel counts are different. Need to use an intermediary input buffer. */ + pWorkingBuffer = temp; + if (framesAvailableOut > tempCapInFrames) { + framesAvailableOut = tempCapInFrames; + } + } + + /* First is resampler. */ + if (isPitchingEnabled) { + ma_uint64 resampleFrameCountIn = framesAvailableIn; + ma_uint64 resampleFrameCountOut = framesAvailableOut; + + ma_linear_resampler_process_pcm_frames(&pEngineNode->resampler, pRunningFramesIn, &resampleFrameCountIn, pWorkingBuffer, &resampleFrameCountOut); + isWorkingBufferValid = MA_TRUE; + + framesJustProcessedIn = (ma_uint32)resampleFrameCountIn; + framesJustProcessedOut = (ma_uint32)resampleFrameCountOut; + } else { + framesJustProcessedIn = ma_min(framesAvailableIn, framesAvailableOut); + framesJustProcessedOut = framesJustProcessedIn; /* When no resampling is being performed, the number of output frames is the same as input frames. */ + } + + /* Fading. */ + if (isFadingEnabled) { + if (isWorkingBufferValid) { + ma_fader_process_pcm_frames(&pEngineNode->fader, pWorkingBuffer, pWorkingBuffer, framesJustProcessedOut); /* In-place processing. */ + } else { + ma_fader_process_pcm_frames(&pEngineNode->fader, pWorkingBuffer, pRunningFramesIn, framesJustProcessedOut); + isWorkingBufferValid = MA_TRUE; + } + } + + /* + If we're using smoothing, we won't be applying volume via the spatializer, but instead from a ma_gainer. In this case + we'll want to apply our volume now. + */ + if (isVolumeSmoothingEnabled) { + if (isWorkingBufferValid) { + ma_gainer_process_pcm_frames(&pEngineNode->volumeGainer, pWorkingBuffer, pWorkingBuffer, framesJustProcessedOut); + } else { + ma_gainer_process_pcm_frames(&pEngineNode->volumeGainer, pWorkingBuffer, pRunningFramesIn, framesJustProcessedOut); + isWorkingBufferValid = MA_TRUE; + } + } + + /* + If at this point we still haven't actually done anything with the working buffer we need + to just read straight from the input buffer. + */ + if (isWorkingBufferValid == MA_FALSE) { + pWorkingBuffer = (float*)pRunningFramesIn; /* Naughty const cast, but it's safe at this point because we won't ever be writing to it from this point out. */ + } + + /* Spatialization. */ + if (isSpatializationEnabled) { + ma_uint32 iListener; + + /* + When determining the listener to use, we first check to see if the sound is pinned to a + specific listener. If so, we use that. Otherwise we just use the closest listener. + */ + if (pEngineNode->pinnedListenerIndex != MA_LISTENER_INDEX_CLOSEST && pEngineNode->pinnedListenerIndex < ma_engine_get_listener_count(pEngineNode->pEngine)) { + iListener = pEngineNode->pinnedListenerIndex; + } else { + ma_vec3f spatializerPosition = ma_spatializer_get_position(&pEngineNode->spatializer); + iListener = ma_engine_find_closest_listener(pEngineNode->pEngine, spatializerPosition.x, spatializerPosition.y, spatializerPosition.z); + } + + ma_spatializer_process_pcm_frames(&pEngineNode->spatializer, &pEngineNode->pEngine->listeners[iListener], pRunningFramesOut, pWorkingBuffer, framesJustProcessedOut); + } else { + /* No spatialization, but we still need to do channel conversion and master volume. */ + float volume; + ma_engine_node_get_volume(pEngineNode, &volume); /* Should never fail. */ + + if (channelsIn == channelsOut) { + /* No channel conversion required. Just copy straight to the output buffer. */ + if (isVolumeSmoothingEnabled) { + /* Volume has already been applied. Just copy straight to the output buffer. */ + ma_copy_pcm_frames(pRunningFramesOut, pWorkingBuffer, framesJustProcessedOut * channelsOut, ma_format_f32, channelsOut); + } else { + /* Volume has not been applied yet. Copy and apply volume in the same pass. */ + ma_copy_and_apply_volume_factor_f32(pRunningFramesOut, pWorkingBuffer, framesJustProcessedOut * channelsOut, volume); + } + } else { + /* Channel conversion required. TODO: Add support for channel maps here. */ + ma_channel_map_apply_f32(pRunningFramesOut, NULL, channelsOut, pWorkingBuffer, NULL, channelsIn, framesJustProcessedOut, ma_channel_mix_mode_simple, pEngineNode->monoExpansionMode); + + /* If we're using smoothing, the volume will have already been applied. */ + if (!isVolumeSmoothingEnabled) { + ma_apply_volume_factor_f32(pRunningFramesOut, framesJustProcessedOut * channelsOut, volume); + } + } + } + + /* At this point we can guarantee that the output buffer contains valid data. We can process everything in place now. */ + + /* Panning. */ + if (isPanningEnabled) { + ma_panner_process_pcm_frames(&pEngineNode->panner, pRunningFramesOut, pRunningFramesOut, framesJustProcessedOut); /* In-place processing. */ + } + + /* We're done for this chunk. */ + totalFramesProcessedIn += framesJustProcessedIn; + totalFramesProcessedOut += framesJustProcessedOut; + + /* If we didn't process any output frames this iteration it means we've either run out of input data, or run out of room in the output buffer. */ + if (framesJustProcessedOut == 0) { + break; + } + } + + /* At this point we're done processing. */ + *pFrameCountIn = totalFramesProcessedIn; + *pFrameCountOut = totalFramesProcessedOut; +} + +static void ma_engine_node_process_pcm_frames__sound(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + /* For sounds, we need to first read from the data source. Then we need to apply the engine effects (pan, pitch, fades, etc.). */ + ma_result result = MA_SUCCESS; + ma_sound* pSound = (ma_sound*)pNode; + ma_uint32 frameCount = *pFrameCountOut; + ma_uint32 totalFramesRead = 0; + ma_format dataSourceFormat; + ma_uint32 dataSourceChannels; + ma_uint8 temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 tempCapInFrames; + ma_uint64 seekTarget; + + /* This is a data source node which means no input buses. */ + (void)ppFramesIn; + (void)pFrameCountIn; + + /* If we're marked at the end we need to stop the sound and do nothing. */ + if (ma_sound_at_end(pSound)) { + ma_sound_stop(pSound); + *pFrameCountOut = 0; + return; + } + + /* If we're seeking, do so now before reading. */ + seekTarget = ma_atomic_load_64(&pSound->seekTarget); + if (seekTarget != MA_SEEK_TARGET_NONE) { + ma_data_source_seek_to_pcm_frame(pSound->pDataSource, seekTarget); + + /* Any time-dependant effects need to have their times updated. */ + ma_node_set_time(pSound, seekTarget); + + ma_atomic_exchange_64(&pSound->seekTarget, MA_SEEK_TARGET_NONE); + } + + /* + We want to update the pitch once. For sounds, this can be either at the start or at the end. If + we don't force this to only ever be updating once, we could end up in a situation where + retrieving the required input frame count ends up being different to what we actually retrieve. + What could happen is that the required input frame count is calculated, the pitch is update, + and then this processing function is called resulting in a different number of input frames + being processed. Do not call this in ma_engine_node_process_pcm_frames__general() or else + you'll hit the aforementioned bug. + */ + ma_engine_node_update_pitch_if_required(&pSound->engineNode); + + /* + For the convenience of the caller, we're doing to allow data sources to use non-floating-point formats and channel counts that differ + from the main engine. + */ + result = ma_data_source_get_data_format(pSound->pDataSource, &dataSourceFormat, &dataSourceChannels, NULL, NULL, 0); + if (result == MA_SUCCESS) { + tempCapInFrames = sizeof(temp) / ma_get_bytes_per_frame(dataSourceFormat, dataSourceChannels); + + /* Keep reading until we've read as much as was requested or we reach the end of the data source. */ + while (totalFramesRead < frameCount) { + ma_uint32 framesRemaining = frameCount - totalFramesRead; + ma_uint32 framesToRead; + ma_uint64 framesJustRead; + ma_uint32 frameCountIn; + ma_uint32 frameCountOut; + const float* pRunningFramesIn; + float* pRunningFramesOut; + + /* + The first thing we need to do is read into the temporary buffer. We can calculate exactly + how many input frames we'll need after resampling. + */ + framesToRead = (ma_uint32)ma_engine_node_get_required_input_frame_count(&pSound->engineNode, framesRemaining); + if (framesToRead > tempCapInFrames) { + framesToRead = tempCapInFrames; + } + + result = ma_data_source_read_pcm_frames(pSound->pDataSource, temp, framesToRead, &framesJustRead); + + /* If we reached the end of the sound we'll want to mark it as at the end and stop it. This should never be returned for looping sounds. */ + if (result == MA_AT_END) { + ma_sound_set_at_end(pSound, MA_TRUE); /* This will be set to false in ma_sound_start(). */ + } + + pRunningFramesOut = ma_offset_pcm_frames_ptr_f32(ppFramesOut[0], totalFramesRead, ma_engine_get_channels(ma_sound_get_engine(pSound))); + + frameCountIn = (ma_uint32)framesJustRead; + frameCountOut = framesRemaining; + + /* Convert if necessary. */ + if (dataSourceFormat == ma_format_f32) { + /* Fast path. No data conversion necessary. */ + pRunningFramesIn = (float*)temp; + ma_engine_node_process_pcm_frames__general(&pSound->engineNode, &pRunningFramesIn, &frameCountIn, &pRunningFramesOut, &frameCountOut); + } else { + /* Slow path. Need to do sample format conversion to f32. If we give the f32 buffer the same count as the first temp buffer, we're guaranteed it'll be large enough. */ + float tempf32[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* Do not do `MA_DATA_CONVERTER_STACK_BUFFER_SIZE/sizeof(float)` here like we've done in other places. */ + ma_convert_pcm_frames_format(tempf32, ma_format_f32, temp, dataSourceFormat, framesJustRead, dataSourceChannels, ma_dither_mode_none); + + /* Now that we have our samples in f32 format we can process like normal. */ + pRunningFramesIn = tempf32; + ma_engine_node_process_pcm_frames__general(&pSound->engineNode, &pRunningFramesIn, &frameCountIn, &pRunningFramesOut, &frameCountOut); + } + + /* We should have processed all of our input frames since we calculated the required number of input frames at the top. */ + MA_ASSERT(frameCountIn == framesJustRead); + totalFramesRead += (ma_uint32)frameCountOut; /* Safe cast. */ + + if (result != MA_SUCCESS || ma_sound_at_end(pSound)) { + break; /* Might have reached the end. */ + } + } + } + + *pFrameCountOut = totalFramesRead; +} + +static void ma_engine_node_process_pcm_frames__group(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + /* + Make sure the pitch is updated before trying to read anything. It's important that this is done + only once and not in ma_engine_node_process_pcm_frames__general(). The reason for this is that + ma_engine_node_process_pcm_frames__general() will call ma_engine_node_get_required_input_frame_count(), + and if another thread modifies the pitch just after that call it can result in a glitch due to + the input rate changing. + */ + ma_engine_node_update_pitch_if_required((ma_engine_node*)pNode); + + /* For groups, the input data has already been read and we just need to apply the effect. */ + ma_engine_node_process_pcm_frames__general((ma_engine_node*)pNode, ppFramesIn, pFrameCountIn, ppFramesOut, pFrameCountOut); +} + +static ma_result ma_engine_node_get_required_input_frame_count__group(ma_node* pNode, ma_uint32 outputFrameCount, ma_uint32* pInputFrameCount) +{ + ma_uint64 inputFrameCount; + + MA_ASSERT(pInputFrameCount != NULL); + + /* Our pitch will affect this calculation. We need to update it. */ + ma_engine_node_update_pitch_if_required((ma_engine_node*)pNode); + + inputFrameCount = ma_engine_node_get_required_input_frame_count((ma_engine_node*)pNode, outputFrameCount); + if (inputFrameCount > 0xFFFFFFFF) { + inputFrameCount = 0xFFFFFFFF; /* Will never happen because miniaudio will only ever process in relatively small chunks. */ + } + + *pInputFrameCount = (ma_uint32)inputFrameCount; + + return MA_SUCCESS; +} + + +static ma_node_vtable g_ma_engine_node_vtable__sound = +{ + ma_engine_node_process_pcm_frames__sound, + NULL, /* onGetRequiredInputFrameCount */ + 0, /* Sounds are data source nodes which means they have zero inputs (their input is drawn from the data source itself). */ + 1, /* Sounds have one output bus. */ + 0 /* Default flags. */ +}; + +static ma_node_vtable g_ma_engine_node_vtable__group = +{ + ma_engine_node_process_pcm_frames__group, + ma_engine_node_get_required_input_frame_count__group, + 1, /* Groups have one input bus. */ + 1, /* Groups have one output bus. */ + MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES /* The engine node does resampling so should let miniaudio know about it. */ +}; + + + +static ma_node_config ma_engine_node_base_node_config_init(const ma_engine_node_config* pConfig) +{ + ma_node_config baseNodeConfig; + + if (pConfig->type == ma_engine_node_type_sound) { + /* Sound. */ + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_engine_node_vtable__sound; + baseNodeConfig.initialState = ma_node_state_stopped; /* Sounds are stopped by default. */ + } else { + /* Group. */ + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_engine_node_vtable__group; + baseNodeConfig.initialState = ma_node_state_started; /* Groups are started by default. */ + } + + return baseNodeConfig; +} + +static ma_spatializer_config ma_engine_node_spatializer_config_init(const ma_node_config* pBaseNodeConfig) +{ + return ma_spatializer_config_init(pBaseNodeConfig->pInputChannels[0], pBaseNodeConfig->pOutputChannels[0]); +} + +typedef struct +{ + size_t sizeInBytes; + size_t baseNodeOffset; + size_t resamplerOffset; + size_t spatializerOffset; + size_t gainerOffset; +} ma_engine_node_heap_layout; + +static ma_result ma_engine_node_get_heap_layout(const ma_engine_node_config* pConfig, ma_engine_node_heap_layout* pHeapLayout) +{ + ma_result result; + size_t tempHeapSize; + ma_node_config baseNodeConfig; + ma_linear_resampler_config resamplerConfig; + ma_spatializer_config spatializerConfig; + ma_gainer_config gainerConfig; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel defaultStereoChannelMap[2] = {MA_CHANNEL_SIDE_LEFT, MA_CHANNEL_SIDE_RIGHT}; /* <-- Consistent with the default channel map of a stereo listener. Means channel conversion can run on a fast path. */ + + MA_ASSERT(pHeapLayout); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pEngine == NULL) { + return MA_INVALID_ARGS; /* An engine must be specified. */ + } + + pHeapLayout->sizeInBytes = 0; + + channelsIn = (pConfig->channelsIn != 0) ? pConfig->channelsIn : ma_engine_get_channels(pConfig->pEngine); + channelsOut = (pConfig->channelsOut != 0) ? pConfig->channelsOut : ma_engine_get_channels(pConfig->pEngine); + + + /* Base node. */ + baseNodeConfig = ma_engine_node_base_node_config_init(pConfig); + baseNodeConfig.pInputChannels = &channelsIn; + baseNodeConfig.pOutputChannels = &channelsOut; + + result = ma_node_get_heap_size(ma_engine_get_node_graph(pConfig->pEngine), &baseNodeConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap for the base node. */ + } + + pHeapLayout->baseNodeOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + + + /* Resmapler. */ + resamplerConfig = ma_linear_resampler_config_init(ma_format_f32, channelsIn, 1, 1); /* Input and output sample rates don't affect the calculation of the heap size. */ + resamplerConfig.lpfOrder = 0; + + result = ma_linear_resampler_get_heap_size(&resamplerConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap for the resampler. */ + } + + pHeapLayout->resamplerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + + + /* Spatializer. */ + spatializerConfig = ma_engine_node_spatializer_config_init(&baseNodeConfig); + + if (spatializerConfig.channelsIn == 2) { + spatializerConfig.pChannelMapIn = defaultStereoChannelMap; + } + + result = ma_spatializer_get_heap_size(&spatializerConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap for the spatializer. */ + } + + pHeapLayout->spatializerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + + + /* Gainer. Will not be used if we are not using smoothing. */ + if (pConfig->volumeSmoothTimeInPCMFrames > 0) { + gainerConfig = ma_gainer_config_init(channelsIn, pConfig->volumeSmoothTimeInPCMFrames); + + result = ma_gainer_get_heap_size(&gainerConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->gainerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + } + + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_node_get_heap_size(const ma_engine_node_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_engine_node_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_engine_node_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_node_init_preallocated(const ma_engine_node_config* pConfig, void* pHeap, ma_engine_node* pEngineNode) +{ + ma_result result; + ma_engine_node_heap_layout heapLayout; + ma_node_config baseNodeConfig; + ma_linear_resampler_config resamplerConfig; + ma_fader_config faderConfig; + ma_spatializer_config spatializerConfig; + ma_panner_config pannerConfig; + ma_gainer_config gainerConfig; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel defaultStereoChannelMap[2] = {MA_CHANNEL_SIDE_LEFT, MA_CHANNEL_SIDE_RIGHT}; /* <-- Consistent with the default channel map of a stereo listener. Means channel conversion can run on a fast path. */ + + if (pEngineNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pEngineNode); + + result = ma_engine_node_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + if (pConfig->pinnedListenerIndex != MA_LISTENER_INDEX_CLOSEST && pConfig->pinnedListenerIndex >= ma_engine_get_listener_count(pConfig->pEngine)) { + return MA_INVALID_ARGS; /* Invalid listener. */ + } + + pEngineNode->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pEngineNode->pEngine = pConfig->pEngine; + pEngineNode->sampleRate = (pConfig->sampleRate > 0) ? pConfig->sampleRate : ma_engine_get_sample_rate(pEngineNode->pEngine); + pEngineNode->volumeSmoothTimeInPCMFrames = pConfig->volumeSmoothTimeInPCMFrames; + pEngineNode->monoExpansionMode = pConfig->monoExpansionMode; + ma_atomic_float_set(&pEngineNode->volume, 1); + pEngineNode->pitch = 1; + pEngineNode->oldPitch = 1; + pEngineNode->oldDopplerPitch = 1; + pEngineNode->isPitchDisabled = pConfig->isPitchDisabled; + pEngineNode->isSpatializationDisabled = pConfig->isSpatializationDisabled; + pEngineNode->pinnedListenerIndex = pConfig->pinnedListenerIndex; + ma_atomic_float_set(&pEngineNode->fadeSettings.volumeBeg, 1); + ma_atomic_float_set(&pEngineNode->fadeSettings.volumeEnd, 1); + ma_atomic_uint64_set(&pEngineNode->fadeSettings.fadeLengthInFrames, (~(ma_uint64)0)); + ma_atomic_uint64_set(&pEngineNode->fadeSettings.absoluteGlobalTimeInFrames, (~(ma_uint64)0)); /* <-- Indicates that the fade should start immediately. */ + + channelsIn = (pConfig->channelsIn != 0) ? pConfig->channelsIn : ma_engine_get_channels(pConfig->pEngine); + channelsOut = (pConfig->channelsOut != 0) ? pConfig->channelsOut : ma_engine_get_channels(pConfig->pEngine); + + /* + If the sample rate of the sound is different to the engine, make sure pitching is enabled so that the resampler + is activated. Not doing this will result in the sound not being resampled if MA_SOUND_FLAG_NO_PITCH is used. + */ + if (pEngineNode->sampleRate != ma_engine_get_sample_rate(pEngineNode->pEngine)) { + pEngineNode->isPitchDisabled = MA_FALSE; + } + + + /* Base node. */ + baseNodeConfig = ma_engine_node_base_node_config_init(pConfig); + baseNodeConfig.pInputChannels = &channelsIn; + baseNodeConfig.pOutputChannels = &channelsOut; + + result = ma_node_init_preallocated(&pConfig->pEngine->nodeGraph, &baseNodeConfig, ma_offset_ptr(pHeap, heapLayout.baseNodeOffset), &pEngineNode->baseNode); + if (result != MA_SUCCESS) { + goto error0; + } + + + /* + We can now initialize the effects we need in order to implement the engine node. There's a + defined order of operations here, mainly centered around when we convert our channels from the + data source's native channel count to the engine's channel count. As a rule, we want to do as + much computation as possible before spatialization because there's a chance that will increase + the channel count, thereby increasing the amount of work needing to be done to process. + */ + + /* We'll always do resampling first. */ + resamplerConfig = ma_linear_resampler_config_init(ma_format_f32, baseNodeConfig.pInputChannels[0], pEngineNode->sampleRate, ma_engine_get_sample_rate(pEngineNode->pEngine)); + resamplerConfig.lpfOrder = 0; /* <-- Need to disable low-pass filtering for pitch shifting for now because there's cases where the biquads are becoming unstable. Need to figure out a better fix for this. */ + + result = ma_linear_resampler_init_preallocated(&resamplerConfig, ma_offset_ptr(pHeap, heapLayout.resamplerOffset), &pEngineNode->resampler); + if (result != MA_SUCCESS) { + goto error1; + } + + + /* After resampling will come the fader. */ + faderConfig = ma_fader_config_init(ma_format_f32, baseNodeConfig.pInputChannels[0], ma_engine_get_sample_rate(pEngineNode->pEngine)); + + result = ma_fader_init(&faderConfig, &pEngineNode->fader); + if (result != MA_SUCCESS) { + goto error2; + } + + + /* + Spatialization comes next. We spatialize based ont he node's output channel count. It's up the caller to + ensure channels counts link up correctly in the node graph. + */ + spatializerConfig = ma_engine_node_spatializer_config_init(&baseNodeConfig); + spatializerConfig.gainSmoothTimeInFrames = pEngineNode->pEngine->gainSmoothTimeInFrames; + + if (spatializerConfig.channelsIn == 2) { + spatializerConfig.pChannelMapIn = defaultStereoChannelMap; + } + + result = ma_spatializer_init_preallocated(&spatializerConfig, ma_offset_ptr(pHeap, heapLayout.spatializerOffset), &pEngineNode->spatializer); + if (result != MA_SUCCESS) { + goto error2; + } + + + /* + After spatialization comes panning. We need to do this after spatialization because otherwise we wouldn't + be able to pan mono sounds. + */ + pannerConfig = ma_panner_config_init(ma_format_f32, baseNodeConfig.pOutputChannels[0]); + + result = ma_panner_init(&pannerConfig, &pEngineNode->panner); + if (result != MA_SUCCESS) { + goto error3; + } + + + /* We'll need a gainer for smoothing out volume changes if we have a non-zero smooth time. We apply this before converting to the output channel count. */ + if (pConfig->volumeSmoothTimeInPCMFrames > 0) { + gainerConfig = ma_gainer_config_init(channelsIn, pConfig->volumeSmoothTimeInPCMFrames); + + result = ma_gainer_init_preallocated(&gainerConfig, ma_offset_ptr(pHeap, heapLayout.gainerOffset), &pEngineNode->volumeGainer); + if (result != MA_SUCCESS) { + goto error3; + } + } + + + return MA_SUCCESS; + + /* No need for allocation callbacks here because we use a preallocated heap. */ +error3: ma_spatializer_uninit(&pEngineNode->spatializer, NULL); +error2: ma_linear_resampler_uninit(&pEngineNode->resampler, NULL); +error1: ma_node_uninit(&pEngineNode->baseNode, NULL); +error0: return result; +} + +MA_API ma_result ma_engine_node_init(const ma_engine_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_engine_node* pEngineNode) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_engine_node_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_engine_node_init_preallocated(pConfig, pHeap, pEngineNode); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pEngineNode->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_engine_node_uninit(ma_engine_node* pEngineNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + /* + The base node always needs to be uninitialized first to ensure it's detached from the graph completely before we + destroy anything that might be in the middle of being used by the processing function. + */ + ma_node_uninit(&pEngineNode->baseNode, pAllocationCallbacks); + + /* Now that the node has been uninitialized we can safely uninitialize the rest. */ + if (pEngineNode->volumeSmoothTimeInPCMFrames > 0) { + ma_gainer_uninit(&pEngineNode->volumeGainer, pAllocationCallbacks); + } + + ma_spatializer_uninit(&pEngineNode->spatializer, pAllocationCallbacks); + ma_linear_resampler_uninit(&pEngineNode->resampler, pAllocationCallbacks); + + /* Free the heap last. */ + if (pEngineNode->_ownsHeap) { + ma_free(pEngineNode->_pHeap, pAllocationCallbacks); + } +} + + +MA_API ma_sound_config ma_sound_config_init(void) +{ + return ma_sound_config_init_2(NULL); +} + +MA_API ma_sound_config ma_sound_config_init_2(ma_engine* pEngine) +{ + ma_sound_config config; + + MA_ZERO_OBJECT(&config); + + if (pEngine != NULL) { + config.monoExpansionMode = pEngine->monoExpansionMode; + } else { + config.monoExpansionMode = ma_mono_expansion_mode_default; + } + + config.rangeEndInPCMFrames = ~((ma_uint64)0); + config.loopPointEndInPCMFrames = ~((ma_uint64)0); + + return config; +} + +MA_API ma_sound_group_config ma_sound_group_config_init(void) +{ + return ma_sound_group_config_init_2(NULL); +} + +MA_API ma_sound_group_config ma_sound_group_config_init_2(ma_engine* pEngine) +{ + ma_sound_group_config config; + + MA_ZERO_OBJECT(&config); + + if (pEngine != NULL) { + config.monoExpansionMode = pEngine->monoExpansionMode; + } else { + config.monoExpansionMode = ma_mono_expansion_mode_default; + } + + return config; +} + + +MA_API ma_engine_config ma_engine_config_init(void) +{ + ma_engine_config config; + + MA_ZERO_OBJECT(&config); + config.listenerCount = 1; /* Always want at least one listener. */ + config.monoExpansionMode = ma_mono_expansion_mode_default; + + return config; +} + + +#if !defined(MA_NO_DEVICE_IO) +static void ma_engine_data_callback_internal(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + ma_engine* pEngine = (ma_engine*)pDevice->pUserData; + + (void)pFramesIn; + + /* + Experiment: Try processing a resource manager job if we're on the Emscripten build. + + This serves two purposes: + + 1) It ensures jobs are actually processed at some point since we cannot guarantee that the + caller is doing the right thing and calling ma_resource_manager_process_next_job(); and + + 2) It's an attempt at working around an issue where processing jobs on the Emscripten main + loop doesn't work as well as it should. When trying to load sounds without the `DECODE` + flag or with the `ASYNC` flag, the sound data is just not able to be loaded in time + before the callback is processed. I think it's got something to do with the single- + threaded nature of Web, but I'm not entirely sure. + */ +#if !defined(MA_NO_RESOURCE_MANAGER) && defined(MA_EMSCRIPTEN) + { + if (pEngine->pResourceManager != NULL) { + if ((pEngine->pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NO_THREADING) != 0) { + ma_resource_manager_process_next_job(pEngine->pResourceManager); + } + } + } +#endif + + ma_engine_read_pcm_frames(pEngine, pFramesOut, frameCount, NULL); +} +#endif + +MA_API ma_result ma_engine_init(const ma_engine_config* pConfig, ma_engine* pEngine) +{ + ma_result result; + ma_node_graph_config nodeGraphConfig; + ma_engine_config engineConfig; + ma_spatializer_listener_config listenerConfig; + ma_uint32 iListener; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pEngine); + + /* The config is allowed to be NULL in which case we use defaults for everything. */ + if (pConfig != NULL) { + engineConfig = *pConfig; + } else { + engineConfig = ma_engine_config_init(); + } + + pEngine->monoExpansionMode = engineConfig.monoExpansionMode; + pEngine->defaultVolumeSmoothTimeInPCMFrames = engineConfig.defaultVolumeSmoothTimeInPCMFrames; + pEngine->onProcess = engineConfig.onProcess; + pEngine->pProcessUserData = engineConfig.pProcessUserData; + ma_allocation_callbacks_init_copy(&pEngine->allocationCallbacks, &engineConfig.allocationCallbacks); + +#if !defined(MA_NO_RESOURCE_MANAGER) + { + pEngine->pResourceManager = engineConfig.pResourceManager; + } +#endif + +#if !defined(MA_NO_DEVICE_IO) + { + pEngine->pDevice = engineConfig.pDevice; + + /* If we don't have a device, we need one. */ + if (pEngine->pDevice == NULL && engineConfig.noDevice == MA_FALSE) { + ma_device_config deviceConfig; + + pEngine->pDevice = (ma_device*)ma_malloc(sizeof(*pEngine->pDevice), &pEngine->allocationCallbacks); + if (pEngine->pDevice == NULL) { + return MA_OUT_OF_MEMORY; + } + + deviceConfig = ma_device_config_init(ma_device_type_playback); + deviceConfig.playback.pDeviceID = engineConfig.pPlaybackDeviceID; + deviceConfig.playback.format = ma_format_f32; + deviceConfig.playback.channels = engineConfig.channels; + deviceConfig.sampleRate = engineConfig.sampleRate; + deviceConfig.dataCallback = (engineConfig.dataCallback != NULL) ? engineConfig.dataCallback : ma_engine_data_callback_internal; + deviceConfig.pUserData = pEngine; + deviceConfig.notificationCallback = engineConfig.notificationCallback; + deviceConfig.periodSizeInFrames = engineConfig.periodSizeInFrames; + deviceConfig.periodSizeInMilliseconds = engineConfig.periodSizeInMilliseconds; + deviceConfig.noPreSilencedOutputBuffer = MA_TRUE; /* We'll always be outputting to every frame in the callback so there's no need for a pre-silenced buffer. */ + deviceConfig.noClip = MA_TRUE; /* The engine will do clipping itself. */ + + if (engineConfig.pContext == NULL) { + ma_context_config contextConfig = ma_context_config_init(); + contextConfig.allocationCallbacks = pEngine->allocationCallbacks; + contextConfig.pLog = engineConfig.pLog; + + /* If the engine config does not specify a log, use the resource manager's if we have one. */ +#ifndef MA_NO_RESOURCE_MANAGER + { + if (contextConfig.pLog == NULL && engineConfig.pResourceManager != NULL) { + contextConfig.pLog = ma_resource_manager_get_log(engineConfig.pResourceManager); + } + } +#endif + + result = ma_device_init_ex(NULL, 0, &contextConfig, &deviceConfig, pEngine->pDevice); + } else { + result = ma_device_init(engineConfig.pContext, &deviceConfig, pEngine->pDevice); + } + + if (result != MA_SUCCESS) { + ma_free(pEngine->pDevice, &pEngine->allocationCallbacks); + pEngine->pDevice = NULL; + return result; + } + + pEngine->ownsDevice = MA_TRUE; + } + + /* Update the channel count and sample rate of the engine config so we can reference it below. */ + if (pEngine->pDevice != NULL) { + engineConfig.channels = pEngine->pDevice->playback.channels; + engineConfig.sampleRate = pEngine->pDevice->sampleRate; + } + } +#endif + + if (engineConfig.channels == 0 || engineConfig.sampleRate == 0) { + return MA_INVALID_ARGS; + } + + pEngine->sampleRate = engineConfig.sampleRate; + + /* The engine always uses either the log that was passed into the config, or the context's log is available. */ + if (engineConfig.pLog != NULL) { + pEngine->pLog = engineConfig.pLog; + } else { +#if !defined(MA_NO_DEVICE_IO) + { + pEngine->pLog = ma_device_get_log(pEngine->pDevice); + } +#else + { + pEngine->pLog = NULL; + } +#endif + } + + + /* The engine is a node graph. This needs to be initialized after we have the device so we can can determine the channel count. */ + nodeGraphConfig = ma_node_graph_config_init(engineConfig.channels); + nodeGraphConfig.nodeCacheCapInFrames = (engineConfig.periodSizeInFrames > 0xFFFF) ? 0xFFFF : (ma_uint16)engineConfig.periodSizeInFrames; + + result = ma_node_graph_init(&nodeGraphConfig, &pEngine->allocationCallbacks, &pEngine->nodeGraph); + if (result != MA_SUCCESS) { + goto on_error_1; + } + + + /* We need at least one listener. */ + if (engineConfig.listenerCount == 0) { + engineConfig.listenerCount = 1; + } + + if (engineConfig.listenerCount > MA_ENGINE_MAX_LISTENERS) { + result = MA_INVALID_ARGS; /* Too many listeners. */ + goto on_error_1; + } + + for (iListener = 0; iListener < engineConfig.listenerCount; iListener += 1) { + listenerConfig = ma_spatializer_listener_config_init(ma_node_graph_get_channels(&pEngine->nodeGraph)); + + /* + If we're using a device, use the device's channel map for the listener. Otherwise just use + miniaudio's default channel map. + */ +#if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->pDevice != NULL) { + /* + Temporarily disabled. There is a subtle bug here where front-left and front-right + will be used by the device's channel map, but this is not what we want to use for + spatialization. Instead we want to use side-left and side-right. I need to figure + out a better solution for this. For now, disabling the use of device channel maps. + */ + /*listenerConfig.pChannelMapOut = pEngine->pDevice->playback.channelMap;*/ + } + } +#endif + + result = ma_spatializer_listener_init(&listenerConfig, &pEngine->allocationCallbacks, &pEngine->listeners[iListener]); /* TODO: Change this to a pre-allocated heap. */ + if (result != MA_SUCCESS) { + goto on_error_2; + } + + pEngine->listenerCount += 1; + } + + + /* Gain smoothing for spatialized sounds. */ + pEngine->gainSmoothTimeInFrames = engineConfig.gainSmoothTimeInFrames; + if (pEngine->gainSmoothTimeInFrames == 0) { + ma_uint32 gainSmoothTimeInMilliseconds = engineConfig.gainSmoothTimeInMilliseconds; + if (gainSmoothTimeInMilliseconds == 0) { + gainSmoothTimeInMilliseconds = 8; + } + + pEngine->gainSmoothTimeInFrames = (gainSmoothTimeInMilliseconds * ma_engine_get_sample_rate(pEngine)) / 1000; /* 8ms by default. */ + } + + + /* We need a resource manager. */ +#ifndef MA_NO_RESOURCE_MANAGER + { + if (pEngine->pResourceManager == NULL) { + ma_resource_manager_config resourceManagerConfig; + + pEngine->pResourceManager = (ma_resource_manager*)ma_malloc(sizeof(*pEngine->pResourceManager), &pEngine->allocationCallbacks); + if (pEngine->pResourceManager == NULL) { + result = MA_OUT_OF_MEMORY; + goto on_error_2; + } + + resourceManagerConfig = ma_resource_manager_config_init(); + resourceManagerConfig.pLog = pEngine->pLog; /* Always use the engine's log for internally-managed resource managers. */ + resourceManagerConfig.decodedFormat = ma_format_f32; + resourceManagerConfig.decodedChannels = 0; /* Leave the decoded channel count as 0 so we can get good spatialization. */ + resourceManagerConfig.decodedSampleRate = ma_engine_get_sample_rate(pEngine); + ma_allocation_callbacks_init_copy(&resourceManagerConfig.allocationCallbacks, &pEngine->allocationCallbacks); + resourceManagerConfig.pVFS = engineConfig.pResourceManagerVFS; + + /* The Emscripten build cannot use threads. */ +#if defined(MA_EMSCRIPTEN) + { + resourceManagerConfig.jobThreadCount = 0; + resourceManagerConfig.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING; + } +#endif + + result = ma_resource_manager_init(&resourceManagerConfig, pEngine->pResourceManager); + if (result != MA_SUCCESS) { + goto on_error_3; + } + + pEngine->ownsResourceManager = MA_TRUE; + } + } +#endif + + /* Setup some stuff for inlined sounds. That is sounds played with ma_engine_play_sound(). */ + pEngine->inlinedSoundLock = 0; + pEngine->pInlinedSoundHead = NULL; + + /* Start the engine if required. This should always be the last step. */ +#if !defined(MA_NO_DEVICE_IO) + { + if (engineConfig.noAutoStart == MA_FALSE && pEngine->pDevice != NULL) { + result = ma_engine_start(pEngine); + if (result != MA_SUCCESS) { + goto on_error_4; /* Failed to start the engine. */ + } + } + } +#endif + + return MA_SUCCESS; + +#if !defined(MA_NO_DEVICE_IO) + on_error_4: +#endif +#if !defined(MA_NO_RESOURCE_MANAGER) + on_error_3: + if (pEngine->ownsResourceManager) { + ma_free(pEngine->pResourceManager, &pEngine->allocationCallbacks); + } +#endif /* MA_NO_RESOURCE_MANAGER */ +on_error_2: + for (iListener = 0; iListener < pEngine->listenerCount; iListener += 1) { + ma_spatializer_listener_uninit(&pEngine->listeners[iListener], &pEngine->allocationCallbacks); + } + + ma_node_graph_uninit(&pEngine->nodeGraph, &pEngine->allocationCallbacks); +on_error_1: +#if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->ownsDevice) { + ma_device_uninit(pEngine->pDevice); + ma_free(pEngine->pDevice, &pEngine->allocationCallbacks); + } + } +#endif + + return result; +} + +MA_API void ma_engine_uninit(ma_engine* pEngine) +{ + ma_uint32 iListener; + + if (pEngine == NULL) { + return; + } + + /* The device must be uninitialized before the node graph to ensure the audio thread doesn't try accessing it. */ +#if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->ownsDevice) { + ma_device_uninit(pEngine->pDevice); + ma_free(pEngine->pDevice, &pEngine->allocationCallbacks); + } else { + if (pEngine->pDevice != NULL) { + ma_device_stop(pEngine->pDevice); + } + } + } +#endif + + /* + All inlined sounds need to be deleted. I'm going to use a lock here just to future proof in case + I want to do some kind of garbage collection later on. + */ + ma_spinlock_lock(&pEngine->inlinedSoundLock); + { + for (;;) { + ma_sound_inlined* pSoundToDelete = pEngine->pInlinedSoundHead; + if (pSoundToDelete == NULL) { + break; /* Done. */ + } + + pEngine->pInlinedSoundHead = pSoundToDelete->pNext; + + ma_sound_uninit(&pSoundToDelete->sound); + ma_free(pSoundToDelete, &pEngine->allocationCallbacks); + } + } + ma_spinlock_unlock(&pEngine->inlinedSoundLock); + + for (iListener = 0; iListener < pEngine->listenerCount; iListener += 1) { + ma_spatializer_listener_uninit(&pEngine->listeners[iListener], &pEngine->allocationCallbacks); + } + + /* Make sure the node graph is uninitialized after the audio thread has been shutdown to prevent accessing of the node graph after being uninitialized. */ + ma_node_graph_uninit(&pEngine->nodeGraph, &pEngine->allocationCallbacks); + + /* Uninitialize the resource manager last to ensure we don't have a thread still trying to access it. */ +#ifndef MA_NO_RESOURCE_MANAGER + if (pEngine->ownsResourceManager) { + ma_resource_manager_uninit(pEngine->pResourceManager); + ma_free(pEngine->pResourceManager, &pEngine->allocationCallbacks); + } +#endif +} + +MA_API ma_result ma_engine_read_pcm_frames(ma_engine* pEngine, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result; + ma_uint64 framesRead = 0; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = ma_node_graph_read_pcm_frames(&pEngine->nodeGraph, pFramesOut, frameCount, &framesRead); + if (result != MA_SUCCESS) { + return result; + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + if (pEngine->onProcess) { + pEngine->onProcess(pEngine->pProcessUserData, (float*)pFramesOut, framesRead); /* Safe cast to float* because the engine always works on floating point samples. */ + } + + return MA_SUCCESS; +} + +MA_API ma_node_graph* ma_engine_get_node_graph(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + + return &pEngine->nodeGraph; +} + +#if !defined(MA_NO_RESOURCE_MANAGER) +MA_API ma_resource_manager* ma_engine_get_resource_manager(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + +#if !defined(MA_NO_RESOURCE_MANAGER) + { + return pEngine->pResourceManager; + } +#else + { + return NULL; + } +#endif +} +#endif + +MA_API ma_device* ma_engine_get_device(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + +#if !defined(MA_NO_DEVICE_IO) + { + return pEngine->pDevice; + } +#else + { + return NULL; + } +#endif +} + +MA_API ma_log* ma_engine_get_log(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + + if (pEngine->pLog != NULL) { + return pEngine->pLog; + } else { +#if !defined(MA_NO_DEVICE_IO) + { + return ma_device_get_log(ma_engine_get_device(pEngine)); + } +#else + { + return NULL; + } +#endif + } +} + +MA_API ma_node* ma_engine_get_endpoint(ma_engine* pEngine) +{ + return ma_node_graph_get_endpoint(&pEngine->nodeGraph); +} + +MA_API ma_uint64 ma_engine_get_time_in_pcm_frames(const ma_engine* pEngine) +{ + return ma_node_graph_get_time(&pEngine->nodeGraph); +} + +MA_API ma_uint64 ma_engine_get_time_in_milliseconds(const ma_engine* pEngine) +{ + return ma_engine_get_time_in_pcm_frames(pEngine) * 1000 / ma_engine_get_sample_rate(pEngine); +} + +MA_API ma_result ma_engine_set_time_in_pcm_frames(ma_engine* pEngine, ma_uint64 globalTime) +{ + return ma_node_graph_set_time(&pEngine->nodeGraph, globalTime); +} + +MA_API ma_result ma_engine_set_time_in_milliseconds(ma_engine* pEngine, ma_uint64 globalTime) +{ + return ma_engine_set_time_in_pcm_frames(pEngine, globalTime * ma_engine_get_sample_rate(pEngine) / 1000); +} + +MA_API ma_uint64 ma_engine_get_time(const ma_engine* pEngine) +{ + return ma_engine_get_time_in_pcm_frames(pEngine); +} + +MA_API ma_result ma_engine_set_time(ma_engine* pEngine, ma_uint64 globalTime) +{ + return ma_engine_set_time_in_pcm_frames(pEngine, globalTime); +} + +MA_API ma_uint32 ma_engine_get_channels(const ma_engine* pEngine) +{ + return ma_node_graph_get_channels(&pEngine->nodeGraph); +} + +MA_API ma_uint32 ma_engine_get_sample_rate(const ma_engine* pEngine) +{ + if (pEngine == NULL) { + return 0; + } + + return pEngine->sampleRate; +} + + +MA_API ma_result ma_engine_start(ma_engine* pEngine) +{ + ma_result result; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->pDevice != NULL) { + result = ma_device_start(pEngine->pDevice); + } else { + result = MA_INVALID_OPERATION; /* The engine is running without a device which means there's no real notion of "starting" the engine. */ + } + } +#else + { + result = MA_INVALID_OPERATION; /* Device IO is disabled, so there's no real notion of "starting" the engine. */ + } +#endif + + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_stop(ma_engine* pEngine) +{ + ma_result result; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->pDevice != NULL) { + result = ma_device_stop(pEngine->pDevice); + } else { + result = MA_INVALID_OPERATION; /* The engine is running without a device which means there's no real notion of "stopping" the engine. */ + } + } +#else + { + result = MA_INVALID_OPERATION; /* Device IO is disabled, so there's no real notion of "stopping" the engine. */ + } +#endif + + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_set_volume(ma_engine* pEngine, float volume) +{ + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + return ma_node_set_output_bus_volume(ma_node_graph_get_endpoint(&pEngine->nodeGraph), 0, volume); +} + +MA_API float ma_engine_get_volume(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return 0; + } + + return ma_node_get_output_bus_volume(ma_node_graph_get_endpoint(&pEngine->nodeGraph), 0); +} + +MA_API ma_result ma_engine_set_gain_db(ma_engine* pEngine, float gainDB) +{ + return ma_engine_set_volume(pEngine, ma_volume_db_to_linear(gainDB)); +} + +MA_API float ma_engine_get_gain_db(ma_engine* pEngine) +{ + return ma_volume_linear_to_db(ma_engine_get_volume(pEngine)); +} + + +MA_API ma_uint32 ma_engine_get_listener_count(const ma_engine* pEngine) +{ + if (pEngine == NULL) { + return 0; + } + + return pEngine->listenerCount; +} + +MA_API ma_uint32 ma_engine_find_closest_listener(const ma_engine* pEngine, float absolutePosX, float absolutePosY, float absolutePosZ) +{ + ma_uint32 iListener; + ma_uint32 iListenerClosest; + float closestLen2 = MA_FLT_MAX; + + if (pEngine == NULL || pEngine->listenerCount == 1) { + return 0; + } + + iListenerClosest = 0; + for (iListener = 0; iListener < pEngine->listenerCount; iListener += 1) { + if (ma_engine_listener_is_enabled(pEngine, iListener)) { + float len2 = ma_vec3f_len2(ma_vec3f_sub(ma_spatializer_listener_get_position(&pEngine->listeners[iListener]), ma_vec3f_init_3f(absolutePosX, absolutePosY, absolutePosZ))); + if (closestLen2 > len2) { + closestLen2 = len2; + iListenerClosest = iListener; + } + } + } + + MA_ASSERT(iListenerClosest < 255); + return iListenerClosest; +} + +MA_API void ma_engine_listener_set_position(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_position(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_position(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_listener_get_position(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_direction(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_direction(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_direction(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 0, -1); + } + + return ma_spatializer_listener_get_direction(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_velocity(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_velocity(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_velocity(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_listener_get_velocity(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_cone(ma_engine* pEngine, ma_uint32 listenerIndex, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_cone(&pEngine->listeners[listenerIndex], innerAngleInRadians, outerAngleInRadians, outerGain); +} + +MA_API void ma_engine_listener_get_cone(const ma_engine* pEngine, ma_uint32 listenerIndex, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = 0; + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = 0; + } + + if (pOuterGain != NULL) { + *pOuterGain = 0; + } + + ma_spatializer_listener_get_cone(&pEngine->listeners[listenerIndex], pInnerAngleInRadians, pOuterAngleInRadians, pOuterGain); +} + +MA_API void ma_engine_listener_set_world_up(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_world_up(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_world_up(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 1, 0); + } + + return ma_spatializer_listener_get_world_up(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_enabled(ma_engine* pEngine, ma_uint32 listenerIndex, ma_bool32 isEnabled) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_enabled(&pEngine->listeners[listenerIndex], isEnabled); +} + +MA_API ma_bool32 ma_engine_listener_is_enabled(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return MA_FALSE; + } + + return ma_spatializer_listener_is_enabled(&pEngine->listeners[listenerIndex]); +} + + +#ifndef MA_NO_RESOURCE_MANAGER +MA_API ma_result ma_engine_play_sound_ex(ma_engine* pEngine, const char* pFilePath, ma_node* pNode, ma_uint32 nodeInputBusIndex) +{ + ma_result result = MA_SUCCESS; + ma_sound_inlined* pSound = NULL; + ma_sound_inlined* pNextSound = NULL; + + if (pEngine == NULL || pFilePath == NULL) { + return MA_INVALID_ARGS; + } + + /* Attach to the endpoint node if nothing is specicied. */ + if (pNode == NULL) { + pNode = ma_node_graph_get_endpoint(&pEngine->nodeGraph); + nodeInputBusIndex = 0; + } + + /* + We want to check if we can recycle an already-allocated inlined sound. Since this is just a + helper I'm not *too* concerned about performance here and I'm happy to use a lock to keep + the implementation simple. Maybe this can be optimized later if there's enough demand, but + if this function is being used it probably means the caller doesn't really care too much. + + What we do is check the atEnd flag. When this is true, we can recycle the sound. Otherwise + we just keep iterating. If we reach the end without finding a sound to recycle we just + allocate a new one. This doesn't scale well for a massive number of sounds being played + simultaneously as we don't ever actually free the sound objects. Some kind of garbage + collection routine might be valuable for this which I'll think about. + */ + ma_spinlock_lock(&pEngine->inlinedSoundLock); + { + ma_uint32 soundFlags = 0; + + for (pNextSound = pEngine->pInlinedSoundHead; pNextSound != NULL; pNextSound = pNextSound->pNext) { + if (ma_sound_at_end(&pNextSound->sound)) { + /* + The sound is at the end which means it's available for recycling. All we need to do + is uninitialize it and reinitialize it. All we're doing is recycling memory. + */ + pSound = pNextSound; + ma_atomic_fetch_sub_32(&pEngine->inlinedSoundCount, 1); + break; + } + } + + if (pSound != NULL) { + /* + We actually want to detach the sound from the list here. The reason is because we want the sound + to be in a consistent state at the non-recycled case to simplify the logic below. + */ + if (pEngine->pInlinedSoundHead == pSound) { + pEngine->pInlinedSoundHead = pSound->pNext; + } + + if (pSound->pPrev != NULL) { + pSound->pPrev->pNext = pSound->pNext; + } + if (pSound->pNext != NULL) { + pSound->pNext->pPrev = pSound->pPrev; + } + + /* Now the previous sound needs to be uninitialized. */ + ma_sound_uninit(&pNextSound->sound); + } else { + /* No sound available for recycling. Allocate one now. */ + pSound = (ma_sound_inlined*)ma_malloc(sizeof(*pSound), &pEngine->allocationCallbacks); + } + + if (pSound != NULL) { /* Safety check for the allocation above. */ + /* + At this point we should have memory allocated for the inlined sound. We just need + to initialize it like a normal sound now. + */ + soundFlags |= MA_SOUND_FLAG_ASYNC; /* For inlined sounds we don't want to be sitting around waiting for stuff to load so force an async load. */ + soundFlags |= MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT; /* We want specific control over where the sound is attached in the graph. We'll attach it manually just before playing the sound. */ + soundFlags |= MA_SOUND_FLAG_NO_PITCH; /* Pitching isn't usable with inlined sounds, so disable it to save on speed. */ + soundFlags |= MA_SOUND_FLAG_NO_SPATIALIZATION; /* Not currently doing spatialization with inlined sounds, but this might actually change later. For now disable spatialization. Will be removed if we ever add support for spatialization here. */ + + result = ma_sound_init_from_file(pEngine, pFilePath, soundFlags, NULL, NULL, &pSound->sound); + if (result == MA_SUCCESS) { + /* Now attach the sound to the graph. */ + result = ma_node_attach_output_bus(pSound, 0, pNode, nodeInputBusIndex); + if (result == MA_SUCCESS) { + /* At this point the sound should be loaded and we can go ahead and add it to the list. The new item becomes the new head. */ + pSound->pNext = pEngine->pInlinedSoundHead; + pSound->pPrev = NULL; + + pEngine->pInlinedSoundHead = pSound; /* <-- This is what attaches the sound to the list. */ + if (pSound->pNext != NULL) { + pSound->pNext->pPrev = pSound; + } + } else { + ma_free(pSound, &pEngine->allocationCallbacks); + } + } else { + ma_free(pSound, &pEngine->allocationCallbacks); + } + } else { + result = MA_OUT_OF_MEMORY; + } + } + ma_spinlock_unlock(&pEngine->inlinedSoundLock); + + if (result != MA_SUCCESS) { + return result; + } + + /* Finally we can start playing the sound. */ + result = ma_sound_start(&pSound->sound); + if (result != MA_SUCCESS) { + /* Failed to start the sound. We need to mark it for recycling and return an error. */ + ma_atomic_exchange_32(&pSound->sound.atEnd, MA_TRUE); + return result; + } + + ma_atomic_fetch_add_32(&pEngine->inlinedSoundCount, 1); + return result; +} + +MA_API ma_result ma_engine_play_sound(ma_engine* pEngine, const char* pFilePath, ma_sound_group* pGroup) +{ + return ma_engine_play_sound_ex(pEngine, pFilePath, pGroup, 0); +} +#endif + + +static ma_result ma_sound_preinit(ma_engine* pEngine, ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pSound); + pSound->seekTarget = MA_SEEK_TARGET_NONE; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +static ma_result ma_sound_init_from_data_source_internal(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound) +{ + ma_result result; + ma_engine_node_config engineNodeConfig; + ma_engine_node_type type; /* Will be set to ma_engine_node_type_group if no data source is specified. */ + + /* Do not clear pSound to zero here - that's done at a higher level with ma_sound_preinit(). */ + MA_ASSERT(pEngine != NULL); + MA_ASSERT(pSound != NULL); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pSound->pDataSource = pConfig->pDataSource; + + if (pConfig->pDataSource != NULL) { + type = ma_engine_node_type_sound; + } else { + type = ma_engine_node_type_group; + } + + /* + Sounds are engine nodes. Before we can initialize this we need to determine the channel count. + If we can't do this we need to abort. It's up to the caller to ensure they're using a data + source that provides this information upfront. + */ + engineNodeConfig = ma_engine_node_config_init(pEngine, type, pConfig->flags); + engineNodeConfig.channelsIn = pConfig->channelsIn; + engineNodeConfig.channelsOut = pConfig->channelsOut; + engineNodeConfig.volumeSmoothTimeInPCMFrames = pConfig->volumeSmoothTimeInPCMFrames; + engineNodeConfig.monoExpansionMode = pConfig->monoExpansionMode; + + if (engineNodeConfig.volumeSmoothTimeInPCMFrames == 0) { + engineNodeConfig.volumeSmoothTimeInPCMFrames = pEngine->defaultVolumeSmoothTimeInPCMFrames; + } + + /* If we're loading from a data source the input channel count needs to be the data source's native channel count. */ + if (pConfig->pDataSource != NULL) { + result = ma_data_source_get_data_format(pConfig->pDataSource, NULL, &engineNodeConfig.channelsIn, &engineNodeConfig.sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the channel count. */ + } + + if (engineNodeConfig.channelsIn == 0) { + return MA_INVALID_OPERATION; /* Invalid channel count. */ + } + + if (engineNodeConfig.channelsOut == MA_SOUND_SOURCE_CHANNEL_COUNT) { + engineNodeConfig.channelsOut = engineNodeConfig.channelsIn; + } + } + + + /* Getting here means we should have a valid channel count and we can initialize the engine node. */ + result = ma_engine_node_init(&engineNodeConfig, &pEngine->allocationCallbacks, &pSound->engineNode); + if (result != MA_SUCCESS) { + return result; + } + + /* If no attachment is specified, attach the sound straight to the endpoint. */ + if (pConfig->pInitialAttachment == NULL) { + /* No group. Attach straight to the endpoint by default, unless the caller has requested that it not. */ + if ((pConfig->flags & MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT) == 0) { + result = ma_node_attach_output_bus(pSound, 0, ma_node_graph_get_endpoint(&pEngine->nodeGraph), 0); + } + } else { + /* An attachment is specified. Attach to it by default. The sound has only a single output bus, and the config will specify which input bus to attach to. */ + result = ma_node_attach_output_bus(pSound, 0, pConfig->pInitialAttachment, pConfig->initialAttachmentInputBusIndex); + } + + if (result != MA_SUCCESS) { + ma_engine_node_uninit(&pSound->engineNode, &pEngine->allocationCallbacks); + return result; + } + + + /* Apply initial range and looping state to the data source if applicable. */ + if (pConfig->rangeBegInPCMFrames != 0 || pConfig->rangeEndInPCMFrames != ~((ma_uint64)0)) { + ma_data_source_set_range_in_pcm_frames(ma_sound_get_data_source(pSound), pConfig->rangeBegInPCMFrames, pConfig->rangeEndInPCMFrames); + } + + if (pConfig->loopPointBegInPCMFrames != 0 || pConfig->loopPointEndInPCMFrames != ~((ma_uint64)0)) { + ma_data_source_set_range_in_pcm_frames(ma_sound_get_data_source(pSound), pConfig->loopPointBegInPCMFrames, pConfig->loopPointEndInPCMFrames); + } + + ma_sound_set_looping(pSound, pConfig->isLooping); + + return MA_SUCCESS; +} + +#ifndef MA_NO_RESOURCE_MANAGER +MA_API ma_result ma_sound_init_from_file_internal(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound) +{ + ma_result result = MA_SUCCESS; + ma_uint32 flags; + ma_sound_config config; + ma_resource_manager_pipeline_notifications notifications; + + /* + The engine requires knowledge of the channel count of the underlying data source before it can + initialize the sound. Therefore, we need to make the resource manager wait until initialization + of the underlying data source to be initialized so we can get access to the channel count. To + do this, the MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT is forced. + + Because we're initializing the data source before the sound, there's a chance the notification + will get triggered before this function returns. This is OK, so long as the caller is aware of + it and can avoid accessing the sound from within the notification. + */ + flags = pConfig->flags | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT; + + pSound->pResourceManagerDataSource = (ma_resource_manager_data_source*)ma_malloc(sizeof(*pSound->pResourceManagerDataSource), &pEngine->allocationCallbacks); + if (pSound->pResourceManagerDataSource == NULL) { + return MA_OUT_OF_MEMORY; + } + + /* Removed in 0.12. Set pDoneFence on the notifications. */ + notifications = pConfig->initNotifications; + if (pConfig->pDoneFence != NULL && notifications.done.pFence == NULL) { + notifications.done.pFence = pConfig->pDoneFence; + } + + /* + We must wrap everything around the fence if one was specified. This ensures ma_fence_wait() does + not return prematurely before the sound has finished initializing. + */ + if (notifications.done.pFence) { ma_fence_acquire(notifications.done.pFence); } + { + ma_resource_manager_data_source_config resourceManagerDataSourceConfig = ma_resource_manager_data_source_config_init(); + resourceManagerDataSourceConfig.pFilePath = pConfig->pFilePath; + resourceManagerDataSourceConfig.pFilePathW = pConfig->pFilePathW; + resourceManagerDataSourceConfig.flags = flags; + resourceManagerDataSourceConfig.pNotifications = ¬ifications; + resourceManagerDataSourceConfig.initialSeekPointInPCMFrames = pConfig->initialSeekPointInPCMFrames; + resourceManagerDataSourceConfig.rangeBegInPCMFrames = pConfig->rangeBegInPCMFrames; + resourceManagerDataSourceConfig.rangeEndInPCMFrames = pConfig->rangeEndInPCMFrames; + resourceManagerDataSourceConfig.loopPointBegInPCMFrames = pConfig->loopPointBegInPCMFrames; + resourceManagerDataSourceConfig.loopPointEndInPCMFrames = pConfig->loopPointEndInPCMFrames; + resourceManagerDataSourceConfig.isLooping = pConfig->isLooping; + + result = ma_resource_manager_data_source_init_ex(pEngine->pResourceManager, &resourceManagerDataSourceConfig, pSound->pResourceManagerDataSource); + if (result != MA_SUCCESS) { + goto done; + } + + pSound->ownsDataSource = MA_TRUE; /* <-- Important. Not setting this will result in the resource manager data source never getting uninitialized. */ + + /* We need to use a slightly customized version of the config so we'll need to make a copy. */ + config = *pConfig; + config.pFilePath = NULL; + config.pFilePathW = NULL; + config.pDataSource = pSound->pResourceManagerDataSource; + + result = ma_sound_init_from_data_source_internal(pEngine, &config, pSound); + if (result != MA_SUCCESS) { + ma_resource_manager_data_source_uninit(pSound->pResourceManagerDataSource); + ma_free(pSound->pResourceManagerDataSource, &pEngine->allocationCallbacks); + MA_ZERO_OBJECT(pSound); + goto done; + } + } +done: + if (notifications.done.pFence) { ma_fence_release(notifications.done.pFence); } + return result; +} + +MA_API ma_result ma_sound_init_from_file(ma_engine* pEngine, const char* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound) +{ + ma_sound_config config; + + if (pFilePath == NULL) { + return MA_INVALID_ARGS; + } + + config = ma_sound_config_init_2(pEngine); + config.pFilePath = pFilePath; + config.flags = flags; + config.pInitialAttachment = pGroup; + config.pDoneFence = pDoneFence; + + return ma_sound_init_ex(pEngine, &config, pSound); +} + +MA_API ma_result ma_sound_init_from_file_w(ma_engine* pEngine, const wchar_t* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound) +{ + ma_sound_config config; + + if (pFilePath == NULL) { + return MA_INVALID_ARGS; + } + + config = ma_sound_config_init_2(pEngine); + config.pFilePathW = pFilePath; + config.flags = flags; + config.pInitialAttachment = pGroup; + config.pDoneFence = pDoneFence; + + return ma_sound_init_ex(pEngine, &config, pSound); +} + +MA_API ma_result ma_sound_init_copy(ma_engine* pEngine, const ma_sound* pExistingSound, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound) +{ + ma_result result; + ma_sound_config config; + + result = ma_sound_preinit(pEngine, pSound); + if (result != MA_SUCCESS) { + return result; + } + + if (pExistingSound == NULL) { + return MA_INVALID_ARGS; + } + + /* Cloning only works for data buffers (not streams) that are loaded from the resource manager. */ + if (pExistingSound->pResourceManagerDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + /* + We need to make a clone of the data source. If the data source is not a data buffer (i.e. a stream) + this will fail. + */ + pSound->pResourceManagerDataSource = (ma_resource_manager_data_source*)ma_malloc(sizeof(*pSound->pResourceManagerDataSource), &pEngine->allocationCallbacks); + if (pSound->pResourceManagerDataSource == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_resource_manager_data_source_init_copy(pEngine->pResourceManager, pExistingSound->pResourceManagerDataSource, pSound->pResourceManagerDataSource); + if (result != MA_SUCCESS) { + ma_free(pSound->pResourceManagerDataSource, &pEngine->allocationCallbacks); + return result; + } + + config = ma_sound_config_init_2(pEngine); + config.pDataSource = pSound->pResourceManagerDataSource; + config.flags = flags; + config.pInitialAttachment = pGroup; + config.monoExpansionMode = pExistingSound->engineNode.monoExpansionMode; + config.volumeSmoothTimeInPCMFrames = pExistingSound->engineNode.volumeSmoothTimeInPCMFrames; + + result = ma_sound_init_from_data_source_internal(pEngine, &config, pSound); + if (result != MA_SUCCESS) { + ma_resource_manager_data_source_uninit(pSound->pResourceManagerDataSource); + ma_free(pSound->pResourceManagerDataSource, &pEngine->allocationCallbacks); + MA_ZERO_OBJECT(pSound); + return result; + } + + /* Make sure the sound is marked as the owner of the data source or else it will never get uninitialized. */ + pSound->ownsDataSource = MA_TRUE; + + return MA_SUCCESS; +} +#endif + +MA_API ma_result ma_sound_init_from_data_source(ma_engine* pEngine, ma_data_source* pDataSource, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound) +{ + ma_sound_config config = ma_sound_config_init_2(pEngine); + config.pDataSource = pDataSource; + config.flags = flags; + config.pInitialAttachment = pGroup; + return ma_sound_init_ex(pEngine, &config, pSound); +} + +MA_API ma_result ma_sound_init_ex(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound) +{ + ma_result result; + + result = ma_sound_preinit(pEngine, pSound); + if (result != MA_SUCCESS) { + return result; + } + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pSound->endCallback = pConfig->endCallback; + pSound->pEndCallbackUserData = pConfig->pEndCallbackUserData; + + /* We need to load the sound differently depending on whether or not we're loading from a file. */ +#ifndef MA_NO_RESOURCE_MANAGER + if (pConfig->pFilePath != NULL || pConfig->pFilePathW != NULL) { + return ma_sound_init_from_file_internal(pEngine, pConfig, pSound); + } else +#endif + { + /* + Getting here means we're not loading from a file. We may be loading from an already-initialized + data source, or none at all. If we aren't specifying any data source, we'll be initializing the + the equivalent to a group. ma_data_source_init_from_data_source_internal() will deal with this + for us, so no special treatment required here. + */ + return ma_sound_init_from_data_source_internal(pEngine, pConfig, pSound); + } +} + +MA_API void ma_sound_uninit(ma_sound* pSound) +{ + if (pSound == NULL) { + return; + } + + /* + Always uninitialize the node first. This ensures it's detached from the graph and does not return until it has done + so which makes thread safety beyond this point trivial. + */ + ma_engine_node_uninit(&pSound->engineNode, &pSound->engineNode.pEngine->allocationCallbacks); + + /* Once the sound is detached from the group we can guarantee that it won't be referenced by the mixer thread which means it's safe for us to destroy the data source. */ +#ifndef MA_NO_RESOURCE_MANAGER + if (pSound->ownsDataSource) { + ma_resource_manager_data_source_uninit(pSound->pResourceManagerDataSource); + ma_free(pSound->pResourceManagerDataSource, &pSound->engineNode.pEngine->allocationCallbacks); + pSound->pDataSource = NULL; + } +#else + MA_ASSERT(pSound->ownsDataSource == MA_FALSE); +#endif +} + +MA_API ma_engine* ma_sound_get_engine(const ma_sound* pSound) +{ + if (pSound == NULL) { + return NULL; + } + + return pSound->engineNode.pEngine; +} + +MA_API ma_data_source* ma_sound_get_data_source(const ma_sound* pSound) +{ + if (pSound == NULL) { + return NULL; + } + + return pSound->pDataSource; +} + +MA_API ma_result ma_sound_start(ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* If the sound is already playing, do nothing. */ + if (ma_sound_is_playing(pSound)) { + return MA_SUCCESS; + } + + /* If the sound is at the end it means we want to start from the start again. */ + if (ma_sound_at_end(pSound)) { + ma_result result = ma_data_source_seek_to_pcm_frame(pSound->pDataSource, 0); + if (result != MA_SUCCESS && result != MA_NOT_IMPLEMENTED) { + return result; /* Failed to seek back to the start. */ + } + + /* Make sure we clear the end indicator. */ + ma_atomic_exchange_32(&pSound->atEnd, MA_FALSE); + } + + /* Make sure the sound is started. If there's a start delay, the sound won't actually start until the start time is reached. */ + ma_node_set_state(pSound, ma_node_state_started); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_stop(ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* This will stop the sound immediately. Use ma_sound_set_stop_time() to stop the sound at a specific time. */ + ma_node_set_state(pSound, ma_node_state_stopped); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_stop_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 fadeLengthInFrames) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* Stopping with a fade out requires us to schedule the stop into the future by the fade length. */ + ma_sound_set_stop_time_with_fade_in_pcm_frames(pSound, ma_engine_get_time(ma_sound_get_engine(pSound)) + fadeLengthInFrames, fadeLengthInFrames); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_stop_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 fadeLengthInMilliseconds) +{ + ma_uint64 sampleRate; + + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + sampleRate = ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); + + return ma_sound_stop_with_fade_in_pcm_frames(pSound, (fadeLengthInMilliseconds * sampleRate) / 1000); +} + +MA_API void ma_sound_set_volume(ma_sound* pSound, float volume) +{ + if (pSound == NULL) { + return; + } + + ma_engine_node_set_volume(&pSound->engineNode, volume); +} + +MA_API float ma_sound_get_volume(const ma_sound* pSound) +{ + float volume = 0; + + if (pSound == NULL) { + return 0; + } + + ma_engine_node_get_volume(&pSound->engineNode, &volume); + + return volume; +} + +MA_API void ma_sound_set_pan(ma_sound* pSound, float pan) +{ + if (pSound == NULL) { + return; + } + + ma_panner_set_pan(&pSound->engineNode.panner, pan); +} + +MA_API float ma_sound_get_pan(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_panner_get_pan(&pSound->engineNode.panner); +} + +MA_API void ma_sound_set_pan_mode(ma_sound* pSound, ma_pan_mode panMode) +{ + if (pSound == NULL) { + return; + } + + ma_panner_set_mode(&pSound->engineNode.panner, panMode); +} + +MA_API ma_pan_mode ma_sound_get_pan_mode(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_pan_mode_balance; + } + + return ma_panner_get_mode(&pSound->engineNode.panner); +} + +MA_API void ma_sound_set_pitch(ma_sound* pSound, float pitch) +{ + if (pSound == NULL) { + return; + } + + if (pitch <= 0) { + return; + } + + ma_atomic_exchange_explicit_f32(&pSound->engineNode.pitch, pitch, ma_atomic_memory_order_release); +} + +MA_API float ma_sound_get_pitch(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSound->engineNode.pitch); /* Naughty const-cast for this. */ +} + +MA_API void ma_sound_set_spatialization_enabled(ma_sound* pSound, ma_bool32 enabled) +{ + if (pSound == NULL) { + return; + } + + ma_atomic_exchange_explicit_32(&pSound->engineNode.isSpatializationDisabled, !enabled, ma_atomic_memory_order_release); +} + +MA_API ma_bool32 ma_sound_is_spatialization_enabled(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + return ma_engine_node_is_spatialization_enabled(&pSound->engineNode); +} + +MA_API void ma_sound_set_pinned_listener_index(ma_sound* pSound, ma_uint32 listenerIndex) +{ + if (pSound == NULL || listenerIndex >= ma_engine_get_listener_count(ma_sound_get_engine(pSound))) { + return; + } + + ma_atomic_exchange_explicit_32(&pSound->engineNode.pinnedListenerIndex, listenerIndex, ma_atomic_memory_order_release); +} + +MA_API ma_uint32 ma_sound_get_pinned_listener_index(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_LISTENER_INDEX_CLOSEST; + } + + return ma_atomic_load_explicit_32(&pSound->engineNode.pinnedListenerIndex, ma_atomic_memory_order_acquire); +} + +MA_API ma_uint32 ma_sound_get_listener_index(const ma_sound* pSound) +{ + ma_uint32 listenerIndex; + + if (pSound == NULL) { + return 0; + } + + listenerIndex = ma_sound_get_pinned_listener_index(pSound); + if (listenerIndex == MA_LISTENER_INDEX_CLOSEST) { + ma_vec3f position = ma_sound_get_position(pSound); + return ma_engine_find_closest_listener(ma_sound_get_engine(pSound), position.x, position.y, position.z); + } + + return listenerIndex; +} + +MA_API ma_vec3f ma_sound_get_direction_to_listener(const ma_sound* pSound) +{ + ma_vec3f relativePos; + ma_engine* pEngine; + + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + pEngine = ma_sound_get_engine(pSound); + if (pEngine == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + ma_spatializer_get_relative_position_and_direction(&pSound->engineNode.spatializer, &pEngine->listeners[ma_sound_get_listener_index(pSound)], &relativePos, NULL); + + return ma_vec3f_normalize(ma_vec3f_neg(relativePos)); +} + +MA_API void ma_sound_set_position(ma_sound* pSound, float x, float y, float z) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_position(&pSound->engineNode.spatializer, x, y, z); +} + +MA_API ma_vec3f ma_sound_get_position(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_get_position(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_direction(ma_sound* pSound, float x, float y, float z) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_direction(&pSound->engineNode.spatializer, x, y, z); +} + +MA_API ma_vec3f ma_sound_get_direction(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_get_direction(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_velocity(ma_sound* pSound, float x, float y, float z) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_velocity(&pSound->engineNode.spatializer, x, y, z); +} + +MA_API ma_vec3f ma_sound_get_velocity(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_get_velocity(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_attenuation_model(ma_sound* pSound, ma_attenuation_model attenuationModel) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_attenuation_model(&pSound->engineNode.spatializer, attenuationModel); +} + +MA_API ma_attenuation_model ma_sound_get_attenuation_model(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_attenuation_model_none; + } + + return ma_spatializer_get_attenuation_model(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_positioning(ma_sound* pSound, ma_positioning positioning) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_positioning(&pSound->engineNode.spatializer, positioning); +} + +MA_API ma_positioning ma_sound_get_positioning(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_positioning_absolute; + } + + return ma_spatializer_get_positioning(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_rolloff(ma_sound* pSound, float rolloff) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_rolloff(&pSound->engineNode.spatializer, rolloff); +} + +MA_API float ma_sound_get_rolloff(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_rolloff(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_min_gain(ma_sound* pSound, float minGain) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_min_gain(&pSound->engineNode.spatializer, minGain); +} + +MA_API float ma_sound_get_min_gain(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_min_gain(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_max_gain(ma_sound* pSound, float maxGain) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_max_gain(&pSound->engineNode.spatializer, maxGain); +} + +MA_API float ma_sound_get_max_gain(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_max_gain(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_min_distance(ma_sound* pSound, float minDistance) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_min_distance(&pSound->engineNode.spatializer, minDistance); +} + +MA_API float ma_sound_get_min_distance(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_min_distance(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_max_distance(ma_sound* pSound, float maxDistance) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_max_distance(&pSound->engineNode.spatializer, maxDistance); +} + +MA_API float ma_sound_get_max_distance(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_max_distance(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_cone(ma_sound* pSound, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_cone(&pSound->engineNode.spatializer, innerAngleInRadians, outerAngleInRadians, outerGain); +} + +MA_API void ma_sound_get_cone(const ma_sound* pSound, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = 0; + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = 0; + } + + if (pOuterGain != NULL) { + *pOuterGain = 0; + } + + ma_spatializer_get_cone(&pSound->engineNode.spatializer, pInnerAngleInRadians, pOuterAngleInRadians, pOuterGain); +} + +MA_API void ma_sound_set_doppler_factor(ma_sound* pSound, float dopplerFactor) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_doppler_factor(&pSound->engineNode.spatializer, dopplerFactor); +} + +MA_API float ma_sound_get_doppler_factor(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_doppler_factor(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_directional_attenuation_factor(ma_sound* pSound, float directionalAttenuationFactor) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_directional_attenuation_factor(&pSound->engineNode.spatializer, directionalAttenuationFactor); +} + +MA_API float ma_sound_get_directional_attenuation_factor(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 1; + } + + return ma_spatializer_get_directional_attenuation_factor(&pSound->engineNode.spatializer); +} + + +MA_API void ma_sound_set_fade_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_fade_start_in_pcm_frames(pSound, volumeBeg, volumeEnd, fadeLengthInFrames, (~(ma_uint64)0)); +} + +MA_API void ma_sound_set_fade_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_fade_in_pcm_frames(pSound, volumeBeg, volumeEnd, (fadeLengthInMilliseconds * pSound->engineNode.fader.config.sampleRate) / 1000); +} + +MA_API void ma_sound_set_fade_start_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames, ma_uint64 absoluteGlobalTimeInFrames) +{ + if (pSound == NULL) { + return; + } + + /* + We don't want to update the fader at this point because we need to use the engine's current time + to derive the fader's start offset. The timer is being updated on the audio thread so in order to + do this as accurately as possible we'll need to defer this to the audio thread. + */ + ma_atomic_float_set(&pSound->engineNode.fadeSettings.volumeBeg, volumeBeg); + ma_atomic_float_set(&pSound->engineNode.fadeSettings.volumeEnd, volumeEnd); + ma_atomic_uint64_set(&pSound->engineNode.fadeSettings.fadeLengthInFrames, fadeLengthInFrames); + ma_atomic_uint64_set(&pSound->engineNode.fadeSettings.absoluteGlobalTimeInFrames, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_set_fade_start_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + ma_uint32 sampleRate; + + if (pSound == NULL) { + return; + } + + sampleRate = ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); + + ma_sound_set_fade_start_in_pcm_frames(pSound, volumeBeg, volumeEnd, (fadeLengthInMilliseconds * sampleRate) / 1000, (absoluteGlobalTimeInMilliseconds * sampleRate) / 1000); +} + +MA_API float ma_sound_get_current_fade_volume(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + return ma_fader_get_current_volume(&pSound->engineNode.fader); +} + +MA_API void ma_sound_set_start_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames) +{ + if (pSound == NULL) { + return; + } + + ma_node_set_state_time(pSound, ma_node_state_started, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_set_start_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_start_time_in_pcm_frames(pSound, absoluteGlobalTimeInMilliseconds * ma_engine_get_sample_rate(ma_sound_get_engine(pSound)) / 1000); +} + +MA_API void ma_sound_set_stop_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_stop_time_with_fade_in_pcm_frames(pSound, absoluteGlobalTimeInFrames, 0); +} + +MA_API void ma_sound_set_stop_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_stop_time_in_pcm_frames(pSound, absoluteGlobalTimeInMilliseconds * ma_engine_get_sample_rate(ma_sound_get_engine(pSound)) / 1000); +} + +MA_API void ma_sound_set_stop_time_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInFrames, ma_uint64 fadeLengthInFrames) +{ + if (pSound == NULL) { + return; + } + + if (fadeLengthInFrames > 0) { + if (fadeLengthInFrames > stopAbsoluteGlobalTimeInFrames) { + fadeLengthInFrames = stopAbsoluteGlobalTimeInFrames; + } + + ma_sound_set_fade_start_in_pcm_frames(pSound, -1, 0, fadeLengthInFrames, stopAbsoluteGlobalTimeInFrames - fadeLengthInFrames); + } + + ma_node_set_state_time(pSound, ma_node_state_stopped, stopAbsoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_set_stop_time_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInMilliseconds, ma_uint64 fadeLengthInMilliseconds) +{ + ma_uint32 sampleRate; + + if (pSound == NULL) { + return; + } + + sampleRate = ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); + + ma_sound_set_stop_time_with_fade_in_pcm_frames(pSound, (stopAbsoluteGlobalTimeInMilliseconds * sampleRate) / 1000, (fadeLengthInMilliseconds * sampleRate) / 1000); +} + +MA_API ma_bool32 ma_sound_is_playing(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + return ma_node_get_state_by_time(pSound, ma_engine_get_time_in_pcm_frames(ma_sound_get_engine(pSound))) == ma_node_state_started; +} + +MA_API ma_uint64 ma_sound_get_time_in_pcm_frames(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_node_get_time(pSound); +} + +MA_API ma_uint64 ma_sound_get_time_in_milliseconds(const ma_sound* pSound) +{ + return ma_sound_get_time_in_pcm_frames(pSound) * 1000 / ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); +} + +MA_API void ma_sound_set_looping(ma_sound* pSound, ma_bool32 isLooping) +{ + if (pSound == NULL) { + return; + } + + /* Looping is only a valid concept if the sound is backed by a data source. */ + if (pSound->pDataSource == NULL) { + return; + } + + /* The looping state needs to be applied to the data source in order for any looping to actually happen. */ + ma_data_source_set_looping(pSound->pDataSource, isLooping); +} + +MA_API ma_bool32 ma_sound_is_looping(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + /* There is no notion of looping for sounds that are not backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_FALSE; + } + + return ma_data_source_is_looping(pSound->pDataSource); +} + +MA_API ma_bool32 ma_sound_at_end(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + /* There is no notion of an end of a sound if it's not backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_FALSE; + } + + return ma_sound_get_at_end(pSound); +} + +MA_API ma_result ma_sound_seek_to_pcm_frame(ma_sound* pSound, ma_uint64 frameIndex) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* Seeking is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + /* We can't be seeking while reading at the same time. We just set the seek target and get the mixing thread to do the actual seek. */ + ma_atomic_exchange_64(&pSound->seekTarget, frameIndex); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_get_data_format(ma_sound* pSound, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The data format is retrieved directly from the data source if the sound is backed by one. Otherwise we pull it from the node. */ + if (pSound->pDataSource == NULL) { + ma_uint32 channels; + + if (pFormat != NULL) { + *pFormat = ma_format_f32; + } + + channels = ma_node_get_input_channels(&pSound->engineNode, 0); + if (pChannels != NULL) { + *pChannels = channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pSound->engineNode.resampler.config.sampleRateIn; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channels); + } + + return MA_SUCCESS; + } else { + return ma_data_source_get_data_format(pSound->pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + } +} + +MA_API ma_result ma_sound_get_cursor_in_pcm_frames(ma_sound* pSound, ma_uint64* pCursor) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of a cursor is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + return ma_data_source_get_cursor_in_pcm_frames(pSound->pDataSource, pCursor); +} + +MA_API ma_result ma_sound_get_length_in_pcm_frames(ma_sound* pSound, ma_uint64* pLength) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of a sound length is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + return ma_data_source_get_length_in_pcm_frames(pSound->pDataSource, pLength); +} + +MA_API ma_result ma_sound_get_cursor_in_seconds(ma_sound* pSound, float* pCursor) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of a cursor is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + return ma_data_source_get_cursor_in_seconds(pSound->pDataSource, pCursor); +} + +MA_API ma_result ma_sound_get_length_in_seconds(ma_sound* pSound, float* pLength) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of a sound length is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + return ma_data_source_get_length_in_seconds(pSound->pDataSource, pLength); +} + +MA_API ma_result ma_sound_set_end_callback(ma_sound* pSound, ma_sound_end_proc callback, void* pUserData) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of an end is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + pSound->endCallback = callback; + pSound->pEndCallbackUserData = pUserData; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_sound_group_init(ma_engine* pEngine, ma_uint32 flags, ma_sound_group* pParentGroup, ma_sound_group* pGroup) +{ + ma_sound_group_config config = ma_sound_group_config_init_2(pEngine); + config.flags = flags; + config.pInitialAttachment = pParentGroup; + return ma_sound_group_init_ex(pEngine, &config, pGroup); +} + +MA_API ma_result ma_sound_group_init_ex(ma_engine* pEngine, const ma_sound_group_config* pConfig, ma_sound_group* pGroup) +{ + ma_sound_config soundConfig; + + if (pGroup == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pGroup); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* A sound group is just a sound without a data source. */ + soundConfig = *pConfig; + soundConfig.pFilePath = NULL; + soundConfig.pFilePathW = NULL; + soundConfig.pDataSource = NULL; + + /* + Groups need to have spatialization disabled by default because I think it'll be pretty rare + that programs will want to spatialize groups (but not unheard of). Certainly it feels like + disabling this by default feels like the right option. Spatialization can be enabled with a + call to ma_sound_group_set_spatialization_enabled(). + */ + soundConfig.flags |= MA_SOUND_FLAG_NO_SPATIALIZATION; + + return ma_sound_init_ex(pEngine, &soundConfig, pGroup); +} + +MA_API void ma_sound_group_uninit(ma_sound_group* pGroup) +{ + ma_sound_uninit(pGroup); +} + +MA_API ma_engine* ma_sound_group_get_engine(const ma_sound_group* pGroup) +{ + return ma_sound_get_engine(pGroup); +} + +MA_API ma_result ma_sound_group_start(ma_sound_group* pGroup) +{ + return ma_sound_start(pGroup); +} + +MA_API ma_result ma_sound_group_stop(ma_sound_group* pGroup) +{ + return ma_sound_stop(pGroup); +} + +MA_API void ma_sound_group_set_volume(ma_sound_group* pGroup, float volume) +{ + ma_sound_set_volume(pGroup, volume); +} + +MA_API float ma_sound_group_get_volume(const ma_sound_group* pGroup) +{ + return ma_sound_get_volume(pGroup); +} + +MA_API void ma_sound_group_set_pan(ma_sound_group* pGroup, float pan) +{ + ma_sound_set_pan(pGroup, pan); +} + +MA_API float ma_sound_group_get_pan(const ma_sound_group* pGroup) +{ + return ma_sound_get_pan(pGroup); +} + +MA_API void ma_sound_group_set_pan_mode(ma_sound_group* pGroup, ma_pan_mode panMode) +{ + ma_sound_set_pan_mode(pGroup, panMode); +} + +MA_API ma_pan_mode ma_sound_group_get_pan_mode(const ma_sound_group* pGroup) +{ + return ma_sound_get_pan_mode(pGroup); +} + +MA_API void ma_sound_group_set_pitch(ma_sound_group* pGroup, float pitch) +{ + ma_sound_set_pitch(pGroup, pitch); +} + +MA_API float ma_sound_group_get_pitch(const ma_sound_group* pGroup) +{ + return ma_sound_get_pitch(pGroup); +} + +MA_API void ma_sound_group_set_spatialization_enabled(ma_sound_group* pGroup, ma_bool32 enabled) +{ + ma_sound_set_spatialization_enabled(pGroup, enabled); +} + +MA_API ma_bool32 ma_sound_group_is_spatialization_enabled(const ma_sound_group* pGroup) +{ + return ma_sound_is_spatialization_enabled(pGroup); +} + +MA_API void ma_sound_group_set_pinned_listener_index(ma_sound_group* pGroup, ma_uint32 listenerIndex) +{ + ma_sound_set_pinned_listener_index(pGroup, listenerIndex); +} + +MA_API ma_uint32 ma_sound_group_get_pinned_listener_index(const ma_sound_group* pGroup) +{ + return ma_sound_get_pinned_listener_index(pGroup); +} + +MA_API ma_uint32 ma_sound_group_get_listener_index(const ma_sound_group* pGroup) +{ + return ma_sound_get_listener_index(pGroup); +} + +MA_API ma_vec3f ma_sound_group_get_direction_to_listener(const ma_sound_group* pGroup) +{ + return ma_sound_get_direction_to_listener(pGroup); +} + +MA_API void ma_sound_group_set_position(ma_sound_group* pGroup, float x, float y, float z) +{ + ma_sound_set_position(pGroup, x, y, z); +} + +MA_API ma_vec3f ma_sound_group_get_position(const ma_sound_group* pGroup) +{ + return ma_sound_get_position(pGroup); +} + +MA_API void ma_sound_group_set_direction(ma_sound_group* pGroup, float x, float y, float z) +{ + ma_sound_set_direction(pGroup, x, y, z); +} + +MA_API ma_vec3f ma_sound_group_get_direction(const ma_sound_group* pGroup) +{ + return ma_sound_get_direction(pGroup); +} + +MA_API void ma_sound_group_set_velocity(ma_sound_group* pGroup, float x, float y, float z) +{ + ma_sound_set_velocity(pGroup, x, y, z); +} + +MA_API ma_vec3f ma_sound_group_get_velocity(const ma_sound_group* pGroup) +{ + return ma_sound_get_velocity(pGroup); +} + +MA_API void ma_sound_group_set_attenuation_model(ma_sound_group* pGroup, ma_attenuation_model attenuationModel) +{ + ma_sound_set_attenuation_model(pGroup, attenuationModel); +} + +MA_API ma_attenuation_model ma_sound_group_get_attenuation_model(const ma_sound_group* pGroup) +{ + return ma_sound_get_attenuation_model(pGroup); +} + +MA_API void ma_sound_group_set_positioning(ma_sound_group* pGroup, ma_positioning positioning) +{ + ma_sound_set_positioning(pGroup, positioning); +} + +MA_API ma_positioning ma_sound_group_get_positioning(const ma_sound_group* pGroup) +{ + return ma_sound_get_positioning(pGroup); +} + +MA_API void ma_sound_group_set_rolloff(ma_sound_group* pGroup, float rolloff) +{ + ma_sound_set_rolloff(pGroup, rolloff); +} + +MA_API float ma_sound_group_get_rolloff(const ma_sound_group* pGroup) +{ + return ma_sound_get_rolloff(pGroup); +} + +MA_API void ma_sound_group_set_min_gain(ma_sound_group* pGroup, float minGain) +{ + ma_sound_set_min_gain(pGroup, minGain); +} + +MA_API float ma_sound_group_get_min_gain(const ma_sound_group* pGroup) +{ + return ma_sound_get_min_gain(pGroup); +} + +MA_API void ma_sound_group_set_max_gain(ma_sound_group* pGroup, float maxGain) +{ + ma_sound_set_max_gain(pGroup, maxGain); +} + +MA_API float ma_sound_group_get_max_gain(const ma_sound_group* pGroup) +{ + return ma_sound_get_max_gain(pGroup); +} + +MA_API void ma_sound_group_set_min_distance(ma_sound_group* pGroup, float minDistance) +{ + ma_sound_set_min_distance(pGroup, minDistance); +} + +MA_API float ma_sound_group_get_min_distance(const ma_sound_group* pGroup) +{ + return ma_sound_get_min_distance(pGroup); +} + +MA_API void ma_sound_group_set_max_distance(ma_sound_group* pGroup, float maxDistance) +{ + ma_sound_set_max_distance(pGroup, maxDistance); +} + +MA_API float ma_sound_group_get_max_distance(const ma_sound_group* pGroup) +{ + return ma_sound_get_max_distance(pGroup); +} + +MA_API void ma_sound_group_set_cone(ma_sound_group* pGroup, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + ma_sound_set_cone(pGroup, innerAngleInRadians, outerAngleInRadians, outerGain); +} + +MA_API void ma_sound_group_get_cone(const ma_sound_group* pGroup, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + ma_sound_get_cone(pGroup, pInnerAngleInRadians, pOuterAngleInRadians, pOuterGain); +} + +MA_API void ma_sound_group_set_doppler_factor(ma_sound_group* pGroup, float dopplerFactor) +{ + ma_sound_set_doppler_factor(pGroup, dopplerFactor); +} + +MA_API float ma_sound_group_get_doppler_factor(const ma_sound_group* pGroup) +{ + return ma_sound_get_doppler_factor(pGroup); +} + +MA_API void ma_sound_group_set_directional_attenuation_factor(ma_sound_group* pGroup, float directionalAttenuationFactor) +{ + ma_sound_set_directional_attenuation_factor(pGroup, directionalAttenuationFactor); +} + +MA_API float ma_sound_group_get_directional_attenuation_factor(const ma_sound_group* pGroup) +{ + return ma_sound_get_directional_attenuation_factor(pGroup); +} + +MA_API void ma_sound_group_set_fade_in_pcm_frames(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames) +{ + ma_sound_set_fade_in_pcm_frames(pGroup, volumeBeg, volumeEnd, fadeLengthInFrames); +} + +MA_API void ma_sound_group_set_fade_in_milliseconds(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds) +{ + ma_sound_set_fade_in_milliseconds(pGroup, volumeBeg, volumeEnd, fadeLengthInMilliseconds); +} + +MA_API float ma_sound_group_get_current_fade_volume(ma_sound_group* pGroup) +{ + return ma_sound_get_current_fade_volume(pGroup); +} + +MA_API void ma_sound_group_set_start_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames) +{ + ma_sound_set_start_time_in_pcm_frames(pGroup, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_group_set_start_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + ma_sound_set_start_time_in_milliseconds(pGroup, absoluteGlobalTimeInMilliseconds); +} + +MA_API void ma_sound_group_set_stop_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames) +{ + ma_sound_set_stop_time_in_pcm_frames(pGroup, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_group_set_stop_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + ma_sound_set_stop_time_in_milliseconds(pGroup, absoluteGlobalTimeInMilliseconds); +} + +MA_API ma_bool32 ma_sound_group_is_playing(const ma_sound_group* pGroup) +{ + return ma_sound_is_playing(pGroup); +} + +MA_API ma_uint64 ma_sound_group_get_time_in_pcm_frames(const ma_sound_group* pGroup) +{ + return ma_sound_get_time_in_pcm_frames(pGroup); +} +#endif /* MA_NO_ENGINE */ +/* END SECTION: miniaudio_engine.c */ + + + +/************************************************************************************************************************************************************** +*************************************************************************************************************************************************************** + +Auto Generated +============== +All code below is auto-generated from a tool. This mostly consists of decoding backend implementations such as ma_dr_wav, ma_dr_flac, etc. If you find a bug in the +code below please report the bug to the respective repository for the relevant project (probably dr_libs). + +*************************************************************************************************************************************************************** +**************************************************************************************************************************************************************/ +#if !defined(MA_NO_WAV) && (!defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING)) +#if !defined(MA_DR_WAV_IMPLEMENTATION) && !defined(MA_DR_WAV_IMPLEMENTATION) /* For backwards compatibility. Will be removed in version 0.11 for cleanliness. */ +/* dr_wav_c begin */ +#ifndef ma_dr_wav_c +#define ma_dr_wav_c +#ifdef __MRC__ +#pragma options opt off +#endif +#include +#include +#include +#ifndef MA_DR_WAV_NO_STDIO +#include +#ifndef MA_DR_WAV_NO_WCHAR +#include +#endif +#endif +#ifndef MA_DR_WAV_ASSERT +#include +#define MA_DR_WAV_ASSERT(expression) assert(expression) +#endif +#ifndef MA_DR_WAV_MALLOC +#define MA_DR_WAV_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_DR_WAV_REALLOC +#define MA_DR_WAV_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_DR_WAV_FREE +#define MA_DR_WAV_FREE(p) free((p)) +#endif +#ifndef MA_DR_WAV_COPY_MEMORY +#define MA_DR_WAV_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_DR_WAV_ZERO_MEMORY +#define MA_DR_WAV_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#ifndef MA_DR_WAV_ZERO_OBJECT +#define MA_DR_WAV_ZERO_OBJECT(p) MA_DR_WAV_ZERO_MEMORY((p), sizeof(*p)) +#endif +#define ma_dr_wav_countof(x) (sizeof(x) / sizeof(x[0])) +#define ma_dr_wav_align(x, a) ((((x) + (a) - 1) / (a)) * (a)) +#define ma_dr_wav_min(a, b) (((a) < (b)) ? (a) : (b)) +#define ma_dr_wav_max(a, b) (((a) > (b)) ? (a) : (b)) +#define ma_dr_wav_clamp(x, lo, hi) (ma_dr_wav_max((lo), ma_dr_wav_min((hi), (x)))) +#define ma_dr_wav_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset)) +#define MA_DR_WAV_MAX_SIMD_VECTOR_SIZE 32 +#define MA_DR_WAV_INT64_MIN ((ma_int64)0x80000000 << 32) +#define MA_DR_WAV_INT64_MAX ((((ma_int64)0x7FFFFFFF) << 32) | 0xFFFFFFFF) +#if defined(_MSC_VER) && _MSC_VER >= 1400 +#define MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC +#define MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC +#define MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC +#elif defined(__clang__) +#if defined(__has_builtin) +#if __has_builtin(__builtin_bswap16) +#define MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC +#endif +#if __has_builtin(__builtin_bswap32) +#define MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC +#endif +#if __has_builtin(__builtin_bswap64) +#define MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC +#endif +#endif +#elif defined(__GNUC__) +#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) +#define MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC +#define MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC +#endif +#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +#define MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC +#endif +#endif +MA_API void ma_dr_wav_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_DR_WAV_VERSION_MAJOR; + } + if (pMinor) { + *pMinor = MA_DR_WAV_VERSION_MINOR; + } + if (pRevision) { + *pRevision = MA_DR_WAV_VERSION_REVISION; + } +} +MA_API const char* ma_dr_wav_version_string(void) +{ + return MA_DR_WAV_VERSION_STRING; +} +#ifndef MA_DR_WAV_MAX_SAMPLE_RATE +#define MA_DR_WAV_MAX_SAMPLE_RATE 384000 +#endif +#ifndef MA_DR_WAV_MAX_CHANNELS +#define MA_DR_WAV_MAX_CHANNELS 256 +#endif +#ifndef MA_DR_WAV_MAX_BITS_PER_SAMPLE +#define MA_DR_WAV_MAX_BITS_PER_SAMPLE 64 +#endif +static const ma_uint8 ma_dr_wavGUID_W64_RIFF[16] = {0x72,0x69,0x66,0x66, 0x2E,0x91, 0xCF,0x11, 0xA5,0xD6, 0x28,0xDB,0x04,0xC1,0x00,0x00}; +static const ma_uint8 ma_dr_wavGUID_W64_WAVE[16] = {0x77,0x61,0x76,0x65, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static const ma_uint8 ma_dr_wavGUID_W64_FMT [16] = {0x66,0x6D,0x74,0x20, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static const ma_uint8 ma_dr_wavGUID_W64_FACT[16] = {0x66,0x61,0x63,0x74, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static const ma_uint8 ma_dr_wavGUID_W64_DATA[16] = {0x64,0x61,0x74,0x61, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static MA_INLINE int ma_dr_wav__is_little_endian(void) +{ +#if defined(MA_X86) || defined(MA_X64) + return MA_TRUE; +#elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN + return MA_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} +static MA_INLINE void ma_dr_wav_bytes_to_guid(const ma_uint8* data, ma_uint8* guid) +{ + int i; + for (i = 0; i < 16; ++i) { + guid[i] = data[i]; + } +} +static MA_INLINE ma_uint16 ma_dr_wav__bswap16(ma_uint16 n) +{ +#ifdef MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC +#if defined(_MSC_VER) + return _byteswap_ushort(n); +#elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap16(n); +#else +#error "This compiler does not support the byte swap intrinsic." +#endif +#else + return ((n & 0xFF00) >> 8) | + ((n & 0x00FF) << 8); +#endif +} +static MA_INLINE ma_uint32 ma_dr_wav__bswap32(ma_uint32 n) +{ +#ifdef MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC +#if defined(_MSC_VER) + return _byteswap_ulong(n); +#elif defined(__GNUC__) || defined(__clang__) +#if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(MA_64BIT) + ma_uint32 r; + __asm__ __volatile__ ( +#if defined(MA_64BIT) + "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) +#else + "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) +#endif + ); + return r; +#else + return __builtin_bswap32(n); +#endif +#else +#error "This compiler does not support the byte swap intrinsic." +#endif +#else + return ((n & 0xFF000000) >> 24) | + ((n & 0x00FF0000) >> 8) | + ((n & 0x0000FF00) << 8) | + ((n & 0x000000FF) << 24); +#endif +} +static MA_INLINE ma_uint64 ma_dr_wav__bswap64(ma_uint64 n) +{ +#ifdef MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC +#if defined(_MSC_VER) + return _byteswap_uint64(n); +#elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap64(n); +#else +#error "This compiler does not support the byte swap intrinsic." +#endif +#else + return ((n & ((ma_uint64)0xFF000000 << 32)) >> 56) | + ((n & ((ma_uint64)0x00FF0000 << 32)) >> 40) | + ((n & ((ma_uint64)0x0000FF00 << 32)) >> 24) | + ((n & ((ma_uint64)0x000000FF << 32)) >> 8) | + ((n & ((ma_uint64)0xFF000000 )) << 8) | + ((n & ((ma_uint64)0x00FF0000 )) << 24) | + ((n & ((ma_uint64)0x0000FF00 )) << 40) | + ((n & ((ma_uint64)0x000000FF )) << 56); +#endif +} +static MA_INLINE ma_int16 ma_dr_wav__bswap_s16(ma_int16 n) +{ + return (ma_int16)ma_dr_wav__bswap16((ma_uint16)n); +} +static MA_INLINE void ma_dr_wav__bswap_samples_s16(ma_int16* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_s16(pSamples[iSample]); + } +} +static MA_INLINE void ma_dr_wav__bswap_s24(ma_uint8* p) +{ + ma_uint8 t; + t = p[0]; + p[0] = p[2]; + p[2] = t; +} +static MA_INLINE void ma_dr_wav__bswap_samples_s24(ma_uint8* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + ma_uint8* pSample = pSamples + (iSample*3); + ma_dr_wav__bswap_s24(pSample); + } +} +static MA_INLINE ma_int32 ma_dr_wav__bswap_s32(ma_int32 n) +{ + return (ma_int32)ma_dr_wav__bswap32((ma_uint32)n); +} +static MA_INLINE void ma_dr_wav__bswap_samples_s32(ma_int32* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_s32(pSamples[iSample]); + } +} +static MA_INLINE ma_int64 ma_dr_wav__bswap_s64(ma_int64 n) +{ + return (ma_int64)ma_dr_wav__bswap64((ma_uint64)n); +} +static MA_INLINE void ma_dr_wav__bswap_samples_s64(ma_int64* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_s64(pSamples[iSample]); + } +} +static MA_INLINE float ma_dr_wav__bswap_f32(float n) +{ + union { + ma_uint32 i; + float f; + } x; + x.f = n; + x.i = ma_dr_wav__bswap32(x.i); + return x.f; +} +static MA_INLINE void ma_dr_wav__bswap_samples_f32(float* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_f32(pSamples[iSample]); + } +} +static MA_INLINE void ma_dr_wav__bswap_samples(void* pSamples, ma_uint64 sampleCount, ma_uint32 bytesPerSample) +{ + switch (bytesPerSample) + { + case 1: + { + } break; + case 2: + { + ma_dr_wav__bswap_samples_s16((ma_int16*)pSamples, sampleCount); + } break; + case 3: + { + ma_dr_wav__bswap_samples_s24((ma_uint8*)pSamples, sampleCount); + } break; + case 4: + { + ma_dr_wav__bswap_samples_s32((ma_int32*)pSamples, sampleCount); + } break; + case 8: + { + ma_dr_wav__bswap_samples_s64((ma_int64*)pSamples, sampleCount); + } break; + default: + { + MA_DR_WAV_ASSERT(MA_FALSE); + } break; + } +} +MA_PRIVATE MA_INLINE ma_bool32 ma_dr_wav_is_container_be(ma_dr_wav_container container) +{ + if (container == ma_dr_wav_container_rifx || container == ma_dr_wav_container_aiff) { + return MA_TRUE; + } else { + return MA_FALSE; + } +} +MA_PRIVATE MA_INLINE ma_uint16 ma_dr_wav_bytes_to_u16_le(const ma_uint8* data) +{ + return ((ma_uint16)data[0] << 0) | ((ma_uint16)data[1] << 8); +} +MA_PRIVATE MA_INLINE ma_uint16 ma_dr_wav_bytes_to_u16_be(const ma_uint8* data) +{ + return ((ma_uint16)data[1] << 0) | ((ma_uint16)data[0] << 8); +} +MA_PRIVATE MA_INLINE ma_uint16 ma_dr_wav_bytes_to_u16_ex(const ma_uint8* data, ma_dr_wav_container container) +{ + if (ma_dr_wav_is_container_be(container)) { + return ma_dr_wav_bytes_to_u16_be(data); + } else { + return ma_dr_wav_bytes_to_u16_le(data); + } +} +MA_PRIVATE MA_INLINE ma_uint32 ma_dr_wav_bytes_to_u32_le(const ma_uint8* data) +{ + return ((ma_uint32)data[0] << 0) | ((ma_uint32)data[1] << 8) | ((ma_uint32)data[2] << 16) | ((ma_uint32)data[3] << 24); +} +MA_PRIVATE MA_INLINE ma_uint32 ma_dr_wav_bytes_to_u32_be(const ma_uint8* data) +{ + return ((ma_uint32)data[3] << 0) | ((ma_uint32)data[2] << 8) | ((ma_uint32)data[1] << 16) | ((ma_uint32)data[0] << 24); +} +MA_PRIVATE MA_INLINE ma_uint32 ma_dr_wav_bytes_to_u32_ex(const ma_uint8* data, ma_dr_wav_container container) +{ + if (ma_dr_wav_is_container_be(container)) { + return ma_dr_wav_bytes_to_u32_be(data); + } else { + return ma_dr_wav_bytes_to_u32_le(data); + } +} +MA_PRIVATE ma_int64 ma_dr_wav_aiff_extented_to_s64(const ma_uint8* data) +{ + ma_uint32 exponent = ((ma_uint32)data[0] << 8) | data[1]; + ma_uint64 hi = ((ma_uint64)data[2] << 24) | ((ma_uint64)data[3] << 16) | ((ma_uint64)data[4] << 8) | ((ma_uint64)data[5] << 0); + ma_uint64 lo = ((ma_uint64)data[6] << 24) | ((ma_uint64)data[7] << 16) | ((ma_uint64)data[8] << 8) | ((ma_uint64)data[9] << 0); + ma_uint64 significand = (hi << 32) | lo; + int sign = exponent >> 15; + exponent &= 0x7FFF; + if (exponent == 0 && significand == 0) { + return 0; + } else if (exponent == 0x7FFF) { + return sign ? MA_DR_WAV_INT64_MIN : MA_DR_WAV_INT64_MAX; + } + exponent -= 16383; + if (exponent > 63) { + return sign ? MA_DR_WAV_INT64_MIN : MA_DR_WAV_INT64_MAX; + } else if (exponent < 1) { + return 0; + } + significand >>= (63 - exponent); + if (sign) { + return -(ma_int64)significand; + } else { + return (ma_int64)significand; + } +} +MA_PRIVATE void* ma_dr_wav__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_WAV_MALLOC(sz); +} +MA_PRIVATE void* ma_dr_wav__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_WAV_REALLOC(p, sz); +} +MA_PRIVATE void ma_dr_wav__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_DR_WAV_FREE(p); +} +MA_PRIVATE void* ma_dr_wav__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + return NULL; +} +MA_PRIVATE void* ma_dr_wav__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + if (p != NULL) { + MA_DR_WAV_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + return p2; + } + return NULL; +} +MA_PRIVATE void ma_dr_wav__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} +MA_PRIVATE ma_allocation_callbacks ma_dr_wav_copy_allocation_callbacks_or_defaults(const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + return *pAllocationCallbacks; + } else { + ma_allocation_callbacks allocationCallbacks; + allocationCallbacks.pUserData = NULL; + allocationCallbacks.onMalloc = ma_dr_wav__malloc_default; + allocationCallbacks.onRealloc = ma_dr_wav__realloc_default; + allocationCallbacks.onFree = ma_dr_wav__free_default; + return allocationCallbacks; + } +} +static MA_INLINE ma_bool32 ma_dr_wav__is_compressed_format_tag(ma_uint16 formatTag) +{ + return + formatTag == MA_DR_WAVE_FORMAT_ADPCM || + formatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM; +} +MA_PRIVATE unsigned int ma_dr_wav__chunk_padding_size_riff(ma_uint64 chunkSize) +{ + return (unsigned int)(chunkSize % 2); +} +MA_PRIVATE unsigned int ma_dr_wav__chunk_padding_size_w64(ma_uint64 chunkSize) +{ + return (unsigned int)(chunkSize % 8); +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__msadpcm(ma_dr_wav* pWav, ma_uint64 samplesToRead, ma_int16* pBufferOut); +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__ima(ma_dr_wav* pWav, ma_uint64 samplesToRead, ma_int16* pBufferOut); +MA_PRIVATE ma_bool32 ma_dr_wav_init_write__internal(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount); +MA_PRIVATE ma_result ma_dr_wav__read_chunk_header(ma_dr_wav_read_proc onRead, void* pUserData, ma_dr_wav_container container, ma_uint64* pRunningBytesReadOut, ma_dr_wav_chunk_header* pHeaderOut) +{ + if (container == ma_dr_wav_container_riff || container == ma_dr_wav_container_rifx || container == ma_dr_wav_container_rf64 || container == ma_dr_wav_container_aiff) { + ma_uint8 sizeInBytes[4]; + if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) { + return MA_AT_END; + } + if (onRead(pUserData, sizeInBytes, 4) != 4) { + return MA_INVALID_FILE; + } + pHeaderOut->sizeInBytes = ma_dr_wav_bytes_to_u32_ex(sizeInBytes, container); + pHeaderOut->paddingSize = ma_dr_wav__chunk_padding_size_riff(pHeaderOut->sizeInBytes); + *pRunningBytesReadOut += 8; + } else if (container == ma_dr_wav_container_w64) { + ma_uint8 sizeInBytes[8]; + if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) { + return MA_AT_END; + } + if (onRead(pUserData, sizeInBytes, 8) != 8) { + return MA_INVALID_FILE; + } + pHeaderOut->sizeInBytes = ma_dr_wav_bytes_to_u64(sizeInBytes) - 24; + pHeaderOut->paddingSize = ma_dr_wav__chunk_padding_size_w64(pHeaderOut->sizeInBytes); + *pRunningBytesReadOut += 24; + } else { + return MA_INVALID_FILE; + } + return MA_SUCCESS; +} +MA_PRIVATE ma_bool32 ma_dr_wav__seek_forward(ma_dr_wav_seek_proc onSeek, ma_uint64 offset, void* pUserData) +{ + ma_uint64 bytesRemainingToSeek = offset; + while (bytesRemainingToSeek > 0) { + if (bytesRemainingToSeek > 0x7FFFFFFF) { + if (!onSeek(pUserData, 0x7FFFFFFF, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + bytesRemainingToSeek -= 0x7FFFFFFF; + } else { + if (!onSeek(pUserData, (int)bytesRemainingToSeek, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + bytesRemainingToSeek = 0; + } + } + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav__seek_from_start(ma_dr_wav_seek_proc onSeek, ma_uint64 offset, void* pUserData) +{ + if (offset <= 0x7FFFFFFF) { + return onSeek(pUserData, (int)offset, ma_dr_wav_seek_origin_start); + } + if (!onSeek(pUserData, 0x7FFFFFFF, ma_dr_wav_seek_origin_start)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + for (;;) { + if (offset <= 0x7FFFFFFF) { + return onSeek(pUserData, (int)offset, ma_dr_wav_seek_origin_current); + } + if (!onSeek(pUserData, 0x7FFFFFFF, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + } +} +MA_PRIVATE size_t ma_dr_wav__on_read(ma_dr_wav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, ma_uint64* pCursor) +{ + size_t bytesRead; + MA_DR_WAV_ASSERT(onRead != NULL); + MA_DR_WAV_ASSERT(pCursor != NULL); + bytesRead = onRead(pUserData, pBufferOut, bytesToRead); + *pCursor += bytesRead; + return bytesRead; +} +#if 0 +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek(ma_dr_wav_seek_proc onSeek, void* pUserData, int offset, ma_dr_wav_seek_origin origin, ma_uint64* pCursor) +{ + MA_DR_WAV_ASSERT(onSeek != NULL); + MA_DR_WAV_ASSERT(pCursor != NULL); + if (!onSeek(pUserData, offset, origin)) { + return MA_FALSE; + } + if (origin == ma_dr_wav_seek_origin_start) { + *pCursor = offset; + } else { + *pCursor += offset; + } + return MA_TRUE; +} +#endif +#define MA_DR_WAV_SMPL_BYTES 36 +#define MA_DR_WAV_SMPL_LOOP_BYTES 24 +#define MA_DR_WAV_INST_BYTES 7 +#define MA_DR_WAV_ACID_BYTES 24 +#define MA_DR_WAV_CUE_BYTES 4 +#define MA_DR_WAV_BEXT_BYTES 602 +#define MA_DR_WAV_BEXT_DESCRIPTION_BYTES 256 +#define MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES 32 +#define MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES 32 +#define MA_DR_WAV_BEXT_RESERVED_BYTES 180 +#define MA_DR_WAV_BEXT_UMID_BYTES 64 +#define MA_DR_WAV_CUE_POINT_BYTES 24 +#define MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES 4 +#define MA_DR_WAV_LIST_LABELLED_TEXT_BYTES 20 +#define MA_DR_WAV_METADATA_ALIGNMENT 8 +typedef enum +{ + ma_dr_wav__metadata_parser_stage_count, + ma_dr_wav__metadata_parser_stage_read +} ma_dr_wav__metadata_parser_stage; +typedef struct +{ + ma_dr_wav_read_proc onRead; + ma_dr_wav_seek_proc onSeek; + void *pReadSeekUserData; + ma_dr_wav__metadata_parser_stage stage; + ma_dr_wav_metadata *pMetadata; + ma_uint32 metadataCount; + ma_uint8 *pData; + ma_uint8 *pDataCursor; + ma_uint64 metadataCursor; + ma_uint64 extraCapacity; +} ma_dr_wav__metadata_parser; +MA_PRIVATE size_t ma_dr_wav__metadata_memory_capacity(ma_dr_wav__metadata_parser* pParser) +{ + ma_uint64 cap = sizeof(ma_dr_wav_metadata) * (ma_uint64)pParser->metadataCount + pParser->extraCapacity; + if (cap > MA_SIZE_MAX) { + return 0; + } + return (size_t)cap; +} +MA_PRIVATE ma_uint8* ma_dr_wav__metadata_get_memory(ma_dr_wav__metadata_parser* pParser, size_t size, size_t align) +{ + ma_uint8* pResult; + if (align) { + ma_uintptr modulo = (ma_uintptr)pParser->pDataCursor % align; + if (modulo != 0) { + pParser->pDataCursor += align - modulo; + } + } + pResult = pParser->pDataCursor; + MA_DR_WAV_ASSERT((pResult + size) <= (pParser->pData + ma_dr_wav__metadata_memory_capacity(pParser))); + pParser->pDataCursor += size; + return pResult; +} +MA_PRIVATE void ma_dr_wav__metadata_request_extra_memory_for_stage_2(ma_dr_wav__metadata_parser* pParser, size_t bytes, size_t align) +{ + size_t extra = bytes + (align ? (align - 1) : 0); + pParser->extraCapacity += extra; +} +MA_PRIVATE ma_result ma_dr_wav__metadata_alloc(ma_dr_wav__metadata_parser* pParser, ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pParser->extraCapacity != 0 || pParser->metadataCount != 0) { + pAllocationCallbacks->onFree(pParser->pData, pAllocationCallbacks->pUserData); + pParser->pData = (ma_uint8*)pAllocationCallbacks->onMalloc(ma_dr_wav__metadata_memory_capacity(pParser), pAllocationCallbacks->pUserData); + pParser->pDataCursor = pParser->pData; + if (pParser->pData == NULL) { + return MA_OUT_OF_MEMORY; + } + pParser->pMetadata = (ma_dr_wav_metadata*)ma_dr_wav__metadata_get_memory(pParser, sizeof(ma_dr_wav_metadata) * pParser->metadataCount, 1); + pParser->metadataCursor = 0; + } + return MA_SUCCESS; +} +MA_PRIVATE size_t ma_dr_wav__metadata_parser_read(ma_dr_wav__metadata_parser* pParser, void* pBufferOut, size_t bytesToRead, ma_uint64* pCursor) +{ + if (pCursor != NULL) { + return ma_dr_wav__on_read(pParser->onRead, pParser->pReadSeekUserData, pBufferOut, bytesToRead, pCursor); + } else { + return pParser->onRead(pParser->pReadSeekUserData, pBufferOut, bytesToRead); + } +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_smpl_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 smplHeaderData[MA_DR_WAV_SMPL_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead; + if (pMetadata == NULL) { + return 0; + } + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, smplHeaderData, sizeof(smplHeaderData), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + MA_DR_WAV_ASSERT(pChunkHeader != NULL); + if (pMetadata != NULL && bytesJustRead == sizeof(smplHeaderData)) { + ma_uint32 iSampleLoop; + pMetadata->type = ma_dr_wav_metadata_type_smpl; + pMetadata->data.smpl.manufacturerId = ma_dr_wav_bytes_to_u32(smplHeaderData + 0); + pMetadata->data.smpl.productId = ma_dr_wav_bytes_to_u32(smplHeaderData + 4); + pMetadata->data.smpl.samplePeriodNanoseconds = ma_dr_wav_bytes_to_u32(smplHeaderData + 8); + pMetadata->data.smpl.midiUnityNote = ma_dr_wav_bytes_to_u32(smplHeaderData + 12); + pMetadata->data.smpl.midiPitchFraction = ma_dr_wav_bytes_to_u32(smplHeaderData + 16); + pMetadata->data.smpl.smpteFormat = ma_dr_wav_bytes_to_u32(smplHeaderData + 20); + pMetadata->data.smpl.smpteOffset = ma_dr_wav_bytes_to_u32(smplHeaderData + 24); + pMetadata->data.smpl.sampleLoopCount = ma_dr_wav_bytes_to_u32(smplHeaderData + 28); + pMetadata->data.smpl.samplerSpecificDataSizeInBytes = ma_dr_wav_bytes_to_u32(smplHeaderData + 32); + if (pMetadata->data.smpl.sampleLoopCount == (pChunkHeader->sizeInBytes - MA_DR_WAV_SMPL_BYTES) / MA_DR_WAV_SMPL_LOOP_BYTES) { + pMetadata->data.smpl.pLoops = (ma_dr_wav_smpl_loop*)ma_dr_wav__metadata_get_memory(pParser, sizeof(ma_dr_wav_smpl_loop) * pMetadata->data.smpl.sampleLoopCount, MA_DR_WAV_METADATA_ALIGNMENT); + for (iSampleLoop = 0; iSampleLoop < pMetadata->data.smpl.sampleLoopCount; ++iSampleLoop) { + ma_uint8 smplLoopData[MA_DR_WAV_SMPL_LOOP_BYTES]; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, smplLoopData, sizeof(smplLoopData), &totalBytesRead); + if (bytesJustRead == sizeof(smplLoopData)) { + pMetadata->data.smpl.pLoops[iSampleLoop].cuePointId = ma_dr_wav_bytes_to_u32(smplLoopData + 0); + pMetadata->data.smpl.pLoops[iSampleLoop].type = ma_dr_wav_bytes_to_u32(smplLoopData + 4); + pMetadata->data.smpl.pLoops[iSampleLoop].firstSampleByteOffset = ma_dr_wav_bytes_to_u32(smplLoopData + 8); + pMetadata->data.smpl.pLoops[iSampleLoop].lastSampleByteOffset = ma_dr_wav_bytes_to_u32(smplLoopData + 12); + pMetadata->data.smpl.pLoops[iSampleLoop].sampleFraction = ma_dr_wav_bytes_to_u32(smplLoopData + 16); + pMetadata->data.smpl.pLoops[iSampleLoop].playCount = ma_dr_wav_bytes_to_u32(smplLoopData + 20); + } else { + break; + } + } + if (pMetadata->data.smpl.samplerSpecificDataSizeInBytes > 0) { + pMetadata->data.smpl.pSamplerSpecificData = ma_dr_wav__metadata_get_memory(pParser, pMetadata->data.smpl.samplerSpecificDataSizeInBytes, 1); + MA_DR_WAV_ASSERT(pMetadata->data.smpl.pSamplerSpecificData != NULL); + ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes, &totalBytesRead); + } + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_cue_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 cueHeaderSectionData[MA_DR_WAV_CUE_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead; + if (pMetadata == NULL) { + return 0; + } + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, cueHeaderSectionData, sizeof(cueHeaderSectionData), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesJustRead == sizeof(cueHeaderSectionData)) { + pMetadata->type = ma_dr_wav_metadata_type_cue; + pMetadata->data.cue.cuePointCount = ma_dr_wav_bytes_to_u32(cueHeaderSectionData); + if (pMetadata->data.cue.cuePointCount == (pChunkHeader->sizeInBytes - MA_DR_WAV_CUE_BYTES) / MA_DR_WAV_CUE_POINT_BYTES) { + pMetadata->data.cue.pCuePoints = (ma_dr_wav_cue_point*)ma_dr_wav__metadata_get_memory(pParser, sizeof(ma_dr_wav_cue_point) * pMetadata->data.cue.cuePointCount, MA_DR_WAV_METADATA_ALIGNMENT); + MA_DR_WAV_ASSERT(pMetadata->data.cue.pCuePoints != NULL); + if (pMetadata->data.cue.cuePointCount > 0) { + ma_uint32 iCuePoint; + for (iCuePoint = 0; iCuePoint < pMetadata->data.cue.cuePointCount; ++iCuePoint) { + ma_uint8 cuePointData[MA_DR_WAV_CUE_POINT_BYTES]; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, cuePointData, sizeof(cuePointData), &totalBytesRead); + if (bytesJustRead == sizeof(cuePointData)) { + pMetadata->data.cue.pCuePoints[iCuePoint].id = ma_dr_wav_bytes_to_u32(cuePointData + 0); + pMetadata->data.cue.pCuePoints[iCuePoint].playOrderPosition = ma_dr_wav_bytes_to_u32(cuePointData + 4); + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[0] = cuePointData[8]; + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[1] = cuePointData[9]; + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[2] = cuePointData[10]; + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[3] = cuePointData[11]; + pMetadata->data.cue.pCuePoints[iCuePoint].chunkStart = ma_dr_wav_bytes_to_u32(cuePointData + 12); + pMetadata->data.cue.pCuePoints[iCuePoint].blockStart = ma_dr_wav_bytes_to_u32(cuePointData + 16); + pMetadata->data.cue.pCuePoints[iCuePoint].sampleByteOffset = ma_dr_wav_bytes_to_u32(cuePointData + 20); + } else { + break; + } + } + } + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_inst_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 instData[MA_DR_WAV_INST_BYTES]; + ma_uint64 bytesRead; + if (pMetadata == NULL) { + return 0; + } + bytesRead = ma_dr_wav__metadata_parser_read(pParser, instData, sizeof(instData), NULL); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesRead == sizeof(instData)) { + pMetadata->type = ma_dr_wav_metadata_type_inst; + pMetadata->data.inst.midiUnityNote = (ma_int8)instData[0]; + pMetadata->data.inst.fineTuneCents = (ma_int8)instData[1]; + pMetadata->data.inst.gainDecibels = (ma_int8)instData[2]; + pMetadata->data.inst.lowNote = (ma_int8)instData[3]; + pMetadata->data.inst.highNote = (ma_int8)instData[4]; + pMetadata->data.inst.lowVelocity = (ma_int8)instData[5]; + pMetadata->data.inst.highVelocity = (ma_int8)instData[6]; + } + return bytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_acid_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 acidData[MA_DR_WAV_ACID_BYTES]; + ma_uint64 bytesRead; + if (pMetadata == NULL) { + return 0; + } + bytesRead = ma_dr_wav__metadata_parser_read(pParser, acidData, sizeof(acidData), NULL); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesRead == sizeof(acidData)) { + pMetadata->type = ma_dr_wav_metadata_type_acid; + pMetadata->data.acid.flags = ma_dr_wav_bytes_to_u32(acidData + 0); + pMetadata->data.acid.midiUnityNote = ma_dr_wav_bytes_to_u16(acidData + 4); + pMetadata->data.acid.reserved1 = ma_dr_wav_bytes_to_u16(acidData + 6); + pMetadata->data.acid.reserved2 = ma_dr_wav_bytes_to_f32(acidData + 8); + pMetadata->data.acid.numBeats = ma_dr_wav_bytes_to_u32(acidData + 12); + pMetadata->data.acid.meterDenominator = ma_dr_wav_bytes_to_u16(acidData + 16); + pMetadata->data.acid.meterNumerator = ma_dr_wav_bytes_to_u16(acidData + 18); + pMetadata->data.acid.tempo = ma_dr_wav_bytes_to_f32(acidData + 20); + } + return bytesRead; +} +MA_PRIVATE size_t ma_dr_wav__strlen(const char* str) +{ + size_t result = 0; + while (*str++) { + result += 1; + } + return result; +} +MA_PRIVATE size_t ma_dr_wav__strlen_clamped(const char* str, size_t maxToRead) +{ + size_t result = 0; + while (*str++ && result < maxToRead) { + result += 1; + } + return result; +} +MA_PRIVATE char* ma_dr_wav__metadata_copy_string(ma_dr_wav__metadata_parser* pParser, const char* str, size_t maxToRead) +{ + size_t len = ma_dr_wav__strlen_clamped(str, maxToRead); + if (len) { + char* result = (char*)ma_dr_wav__metadata_get_memory(pParser, len + 1, 1); + MA_DR_WAV_ASSERT(result != NULL); + MA_DR_WAV_COPY_MEMORY(result, str, len); + result[len] = '\0'; + return result; + } else { + return NULL; + } +} +typedef struct +{ + const void* pBuffer; + size_t sizeInBytes; + size_t cursor; +} ma_dr_wav_buffer_reader; +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_init(const void* pBuffer, size_t sizeInBytes, ma_dr_wav_buffer_reader* pReader) +{ + MA_DR_WAV_ASSERT(pBuffer != NULL); + MA_DR_WAV_ASSERT(pReader != NULL); + MA_DR_WAV_ZERO_OBJECT(pReader); + pReader->pBuffer = pBuffer; + pReader->sizeInBytes = sizeInBytes; + pReader->cursor = 0; + return MA_SUCCESS; +} +MA_PRIVATE const void* ma_dr_wav_buffer_reader_ptr(const ma_dr_wav_buffer_reader* pReader) +{ + MA_DR_WAV_ASSERT(pReader != NULL); + return ma_dr_wav_offset_ptr(pReader->pBuffer, pReader->cursor); +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_seek(ma_dr_wav_buffer_reader* pReader, size_t bytesToSeek) +{ + MA_DR_WAV_ASSERT(pReader != NULL); + if (pReader->cursor + bytesToSeek > pReader->sizeInBytes) { + return MA_BAD_SEEK; + } + pReader->cursor += bytesToSeek; + return MA_SUCCESS; +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_read(ma_dr_wav_buffer_reader* pReader, void* pDst, size_t bytesToRead, size_t* pBytesRead) +{ + ma_result result = MA_SUCCESS; + size_t bytesRemaining; + MA_DR_WAV_ASSERT(pReader != NULL); + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + bytesRemaining = (pReader->sizeInBytes - pReader->cursor); + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (pDst == NULL) { + result = ma_dr_wav_buffer_reader_seek(pReader, bytesToRead); + } else { + MA_DR_WAV_COPY_MEMORY(pDst, ma_dr_wav_buffer_reader_ptr(pReader), bytesToRead); + pReader->cursor += bytesToRead; + } + MA_DR_WAV_ASSERT(pReader->cursor <= pReader->sizeInBytes); + if (result == MA_SUCCESS) { + if (pBytesRead != NULL) { + *pBytesRead = bytesToRead; + } + } + return MA_SUCCESS; +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_read_u16(ma_dr_wav_buffer_reader* pReader, ma_uint16* pDst) +{ + ma_result result; + size_t bytesRead; + ma_uint8 data[2]; + MA_DR_WAV_ASSERT(pReader != NULL); + MA_DR_WAV_ASSERT(pDst != NULL); + *pDst = 0; + result = ma_dr_wav_buffer_reader_read(pReader, data, sizeof(*pDst), &bytesRead); + if (result != MA_SUCCESS || bytesRead != sizeof(*pDst)) { + return result; + } + *pDst = ma_dr_wav_bytes_to_u16(data); + return MA_SUCCESS; +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_read_u32(ma_dr_wav_buffer_reader* pReader, ma_uint32* pDst) +{ + ma_result result; + size_t bytesRead; + ma_uint8 data[4]; + MA_DR_WAV_ASSERT(pReader != NULL); + MA_DR_WAV_ASSERT(pDst != NULL); + *pDst = 0; + result = ma_dr_wav_buffer_reader_read(pReader, data, sizeof(*pDst), &bytesRead); + if (result != MA_SUCCESS || bytesRead != sizeof(*pDst)) { + return result; + } + *pDst = ma_dr_wav_bytes_to_u32(data); + return MA_SUCCESS; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_bext_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata, ma_uint64 chunkSize) +{ + ma_uint8 bextData[MA_DR_WAV_BEXT_BYTES]; + size_t bytesRead = ma_dr_wav__metadata_parser_read(pParser, bextData, sizeof(bextData), NULL); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesRead == sizeof(bextData)) { + ma_dr_wav_buffer_reader reader; + ma_uint32 timeReferenceLow; + ma_uint32 timeReferenceHigh; + size_t extraBytes; + pMetadata->type = ma_dr_wav_metadata_type_bext; + if (ma_dr_wav_buffer_reader_init(bextData, bytesRead, &reader) == MA_SUCCESS) { + pMetadata->data.bext.pDescription = ma_dr_wav__metadata_copy_string(pParser, (const char*)ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_DESCRIPTION_BYTES); + ma_dr_wav_buffer_reader_seek(&reader, MA_DR_WAV_BEXT_DESCRIPTION_BYTES); + pMetadata->data.bext.pOriginatorName = ma_dr_wav__metadata_copy_string(pParser, (const char*)ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES); + ma_dr_wav_buffer_reader_seek(&reader, MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES); + pMetadata->data.bext.pOriginatorReference = ma_dr_wav__metadata_copy_string(pParser, (const char*)ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES); + ma_dr_wav_buffer_reader_seek(&reader, MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES); + ma_dr_wav_buffer_reader_read(&reader, pMetadata->data.bext.pOriginationDate, sizeof(pMetadata->data.bext.pOriginationDate), NULL); + ma_dr_wav_buffer_reader_read(&reader, pMetadata->data.bext.pOriginationTime, sizeof(pMetadata->data.bext.pOriginationTime), NULL); + ma_dr_wav_buffer_reader_read_u32(&reader, &timeReferenceLow); + ma_dr_wav_buffer_reader_read_u32(&reader, &timeReferenceHigh); + pMetadata->data.bext.timeReference = ((ma_uint64)timeReferenceHigh << 32) + timeReferenceLow; + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.version); + pMetadata->data.bext.pUMID = ma_dr_wav__metadata_get_memory(pParser, MA_DR_WAV_BEXT_UMID_BYTES, 1); + ma_dr_wav_buffer_reader_read(&reader, pMetadata->data.bext.pUMID, MA_DR_WAV_BEXT_UMID_BYTES, NULL); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.loudnessValue); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.loudnessRange); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxTruePeakLevel); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxMomentaryLoudness); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxShortTermLoudness); + MA_DR_WAV_ASSERT((ma_dr_wav_offset_ptr(ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_RESERVED_BYTES)) == (bextData + MA_DR_WAV_BEXT_BYTES)); + extraBytes = (size_t)(chunkSize - MA_DR_WAV_BEXT_BYTES); + if (extraBytes > 0) { + pMetadata->data.bext.pCodingHistory = (char*)ma_dr_wav__metadata_get_memory(pParser, extraBytes + 1, 1); + MA_DR_WAV_ASSERT(pMetadata->data.bext.pCodingHistory != NULL); + bytesRead += ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.bext.pCodingHistory, extraBytes, NULL); + pMetadata->data.bext.codingHistorySize = (ma_uint32)ma_dr_wav__strlen(pMetadata->data.bext.pCodingHistory); + } else { + pMetadata->data.bext.pCodingHistory = NULL; + pMetadata->data.bext.codingHistorySize = 0; + } + } + } + return bytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_list_label_or_note_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata, ma_uint64 chunkSize, ma_dr_wav_metadata_type type) +{ + ma_uint8 cueIDBuffer[MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, cueIDBuffer, sizeof(cueIDBuffer), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesJustRead == sizeof(cueIDBuffer)) { + ma_uint32 sizeIncludingNullTerminator; + pMetadata->type = type; + pMetadata->data.labelOrNote.cuePointId = ma_dr_wav_bytes_to_u32(cueIDBuffer); + sizeIncludingNullTerminator = (ma_uint32)chunkSize - MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + if (sizeIncludingNullTerminator > 0) { + pMetadata->data.labelOrNote.stringLength = sizeIncludingNullTerminator - 1; + pMetadata->data.labelOrNote.pString = (char*)ma_dr_wav__metadata_get_memory(pParser, sizeIncludingNullTerminator, 1); + MA_DR_WAV_ASSERT(pMetadata->data.labelOrNote.pString != NULL); + ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.labelOrNote.pString, sizeIncludingNullTerminator, &totalBytesRead); + } else { + pMetadata->data.labelOrNote.stringLength = 0; + pMetadata->data.labelOrNote.pString = NULL; + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_list_labelled_cue_region_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata, ma_uint64 chunkSize) +{ + ma_uint8 buffer[MA_DR_WAV_LIST_LABELLED_TEXT_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, sizeof(buffer), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesJustRead == sizeof(buffer)) { + ma_uint32 sizeIncludingNullTerminator; + pMetadata->type = ma_dr_wav_metadata_type_list_labelled_cue_region; + pMetadata->data.labelledCueRegion.cuePointId = ma_dr_wav_bytes_to_u32(buffer + 0); + pMetadata->data.labelledCueRegion.sampleLength = ma_dr_wav_bytes_to_u32(buffer + 4); + pMetadata->data.labelledCueRegion.purposeId[0] = buffer[8]; + pMetadata->data.labelledCueRegion.purposeId[1] = buffer[9]; + pMetadata->data.labelledCueRegion.purposeId[2] = buffer[10]; + pMetadata->data.labelledCueRegion.purposeId[3] = buffer[11]; + pMetadata->data.labelledCueRegion.country = ma_dr_wav_bytes_to_u16(buffer + 12); + pMetadata->data.labelledCueRegion.language = ma_dr_wav_bytes_to_u16(buffer + 14); + pMetadata->data.labelledCueRegion.dialect = ma_dr_wav_bytes_to_u16(buffer + 16); + pMetadata->data.labelledCueRegion.codePage = ma_dr_wav_bytes_to_u16(buffer + 18); + sizeIncludingNullTerminator = (ma_uint32)chunkSize - MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + if (sizeIncludingNullTerminator > 0) { + pMetadata->data.labelledCueRegion.stringLength = sizeIncludingNullTerminator - 1; + pMetadata->data.labelledCueRegion.pString = (char*)ma_dr_wav__metadata_get_memory(pParser, sizeIncludingNullTerminator, 1); + MA_DR_WAV_ASSERT(pMetadata->data.labelledCueRegion.pString != NULL); + ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.labelledCueRegion.pString, sizeIncludingNullTerminator, &totalBytesRead); + } else { + pMetadata->data.labelledCueRegion.stringLength = 0; + pMetadata->data.labelledCueRegion.pString = NULL; + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__metadata_process_info_text_chunk(ma_dr_wav__metadata_parser* pParser, ma_uint64 chunkSize, ma_dr_wav_metadata_type type) +{ + ma_uint64 bytesRead = 0; + ma_uint32 stringSizeWithNullTerminator = (ma_uint32)chunkSize; + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, stringSizeWithNullTerminator, 1); + } else { + ma_dr_wav_metadata* pMetadata = &pParser->pMetadata[pParser->metadataCursor]; + pMetadata->type = type; + if (stringSizeWithNullTerminator > 0) { + pMetadata->data.infoText.stringLength = stringSizeWithNullTerminator - 1; + pMetadata->data.infoText.pString = (char*)ma_dr_wav__metadata_get_memory(pParser, stringSizeWithNullTerminator, 1); + MA_DR_WAV_ASSERT(pMetadata->data.infoText.pString != NULL); + bytesRead = ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.infoText.pString, (size_t)stringSizeWithNullTerminator, NULL); + if (bytesRead == chunkSize) { + pParser->metadataCursor += 1; + } else { + } + } else { + pMetadata->data.infoText.stringLength = 0; + pMetadata->data.infoText.pString = NULL; + pParser->metadataCursor += 1; + } + } + return bytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__metadata_process_unknown_chunk(ma_dr_wav__metadata_parser* pParser, const ma_uint8* pChunkId, ma_uint64 chunkSize, ma_dr_wav_metadata_location location) +{ + ma_uint64 bytesRead = 0; + if (location == ma_dr_wav_metadata_location_invalid) { + return 0; + } + if (ma_dr_wav_fourcc_equal(pChunkId, "data") || ma_dr_wav_fourcc_equal(pChunkId, "fmt ") || ma_dr_wav_fourcc_equal(pChunkId, "fact")) { + return 0; + } + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)chunkSize, 1); + } else { + ma_dr_wav_metadata* pMetadata = &pParser->pMetadata[pParser->metadataCursor]; + pMetadata->type = ma_dr_wav_metadata_type_unknown; + pMetadata->data.unknown.chunkLocation = location; + pMetadata->data.unknown.id[0] = pChunkId[0]; + pMetadata->data.unknown.id[1] = pChunkId[1]; + pMetadata->data.unknown.id[2] = pChunkId[2]; + pMetadata->data.unknown.id[3] = pChunkId[3]; + pMetadata->data.unknown.dataSizeInBytes = (ma_uint32)chunkSize; + pMetadata->data.unknown.pData = (ma_uint8 *)ma_dr_wav__metadata_get_memory(pParser, (size_t)chunkSize, 1); + MA_DR_WAV_ASSERT(pMetadata->data.unknown.pData != NULL); + bytesRead = ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.unknown.pData, pMetadata->data.unknown.dataSizeInBytes, NULL); + if (bytesRead == pMetadata->data.unknown.dataSizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + return bytesRead; +} +MA_PRIVATE ma_bool32 ma_dr_wav__chunk_matches(ma_dr_wav_metadata_type allowedMetadataTypes, const ma_uint8* pChunkID, ma_dr_wav_metadata_type type, const char* pID) +{ + return (allowedMetadataTypes & type) && ma_dr_wav_fourcc_equal(pChunkID, pID); +} +MA_PRIVATE ma_uint64 ma_dr_wav__metadata_process_chunk(ma_dr_wav__metadata_parser* pParser, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_metadata_type allowedMetadataTypes) +{ + const ma_uint8 *pChunkID = pChunkHeader->id.fourcc; + ma_uint64 bytesRead = 0; + if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_smpl, "smpl")) { + if (pChunkHeader->sizeInBytes >= MA_DR_WAV_SMPL_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + ma_uint8 buffer[4]; + size_t bytesJustRead; + if (!pParser->onSeek(pParser->pReadSeekUserData, 28, ma_dr_wav_seek_origin_current)) { + return bytesRead; + } + bytesRead += 28; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, sizeof(buffer), &bytesRead); + if (bytesJustRead == sizeof(buffer)) { + ma_uint32 loopCount = ma_dr_wav_bytes_to_u32(buffer); + ma_uint64 calculatedLoopCount; + calculatedLoopCount = (pChunkHeader->sizeInBytes - MA_DR_WAV_SMPL_BYTES) / MA_DR_WAV_SMPL_LOOP_BYTES; + if (calculatedLoopCount == loopCount) { + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, sizeof(buffer), &bytesRead); + if (bytesJustRead == sizeof(buffer)) { + ma_uint32 samplerSpecificDataSizeInBytes = ma_dr_wav_bytes_to_u32(buffer); + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, sizeof(ma_dr_wav_smpl_loop) * loopCount, MA_DR_WAV_METADATA_ALIGNMENT); + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, samplerSpecificDataSizeInBytes, 1); + } + } else { + } + } + } else { + bytesRead = ma_dr_wav__read_smpl_to_metadata_obj(pParser, pChunkHeader, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_inst, "inst")) { + if (pChunkHeader->sizeInBytes == MA_DR_WAV_INST_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + } else { + bytesRead = ma_dr_wav__read_inst_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_acid, "acid")) { + if (pChunkHeader->sizeInBytes == MA_DR_WAV_ACID_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + } else { + bytesRead = ma_dr_wav__read_acid_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_cue, "cue ")) { + if (pChunkHeader->sizeInBytes >= MA_DR_WAV_CUE_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + size_t cueCount; + pParser->metadataCount += 1; + cueCount = (size_t)(pChunkHeader->sizeInBytes - MA_DR_WAV_CUE_BYTES) / MA_DR_WAV_CUE_POINT_BYTES; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, sizeof(ma_dr_wav_cue_point) * cueCount, MA_DR_WAV_METADATA_ALIGNMENT); + } else { + bytesRead = ma_dr_wav__read_cue_to_metadata_obj(pParser, pChunkHeader, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_bext, "bext")) { + if (pChunkHeader->sizeInBytes >= MA_DR_WAV_BEXT_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + char buffer[MA_DR_WAV_BEXT_DESCRIPTION_BYTES + 1]; + size_t allocSizeNeeded = MA_DR_WAV_BEXT_UMID_BYTES; + size_t bytesJustRead; + buffer[MA_DR_WAV_BEXT_DESCRIPTION_BYTES] = '\0'; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, MA_DR_WAV_BEXT_DESCRIPTION_BYTES, &bytesRead); + if (bytesJustRead != MA_DR_WAV_BEXT_DESCRIPTION_BYTES) { + return bytesRead; + } + allocSizeNeeded += ma_dr_wav__strlen(buffer) + 1; + buffer[MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES] = '\0'; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES, &bytesRead); + if (bytesJustRead != MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES) { + return bytesRead; + } + allocSizeNeeded += ma_dr_wav__strlen(buffer) + 1; + buffer[MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES] = '\0'; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES, &bytesRead); + if (bytesJustRead != MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES) { + return bytesRead; + } + allocSizeNeeded += ma_dr_wav__strlen(buffer) + 1; + allocSizeNeeded += (size_t)pChunkHeader->sizeInBytes - MA_DR_WAV_BEXT_BYTES; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, allocSizeNeeded, 1); + pParser->metadataCount += 1; + } else { + bytesRead = ma_dr_wav__read_bext_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], pChunkHeader->sizeInBytes); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav_fourcc_equal(pChunkID, "LIST") || ma_dr_wav_fourcc_equal(pChunkID, "list")) { + ma_dr_wav_metadata_location listType = ma_dr_wav_metadata_location_invalid; + while (bytesRead < pChunkHeader->sizeInBytes) { + ma_uint8 subchunkId[4]; + ma_uint8 subchunkSizeBuffer[4]; + ma_uint64 subchunkDataSize; + ma_uint64 subchunkBytesRead = 0; + ma_uint64 bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, subchunkId, sizeof(subchunkId), &bytesRead); + if (bytesJustRead != sizeof(subchunkId)) { + break; + } + if (ma_dr_wav_fourcc_equal(subchunkId, "adtl")) { + listType = ma_dr_wav_metadata_location_inside_adtl_list; + continue; + } else if (ma_dr_wav_fourcc_equal(subchunkId, "INFO")) { + listType = ma_dr_wav_metadata_location_inside_info_list; + continue; + } + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, subchunkSizeBuffer, sizeof(subchunkSizeBuffer), &bytesRead); + if (bytesJustRead != sizeof(subchunkSizeBuffer)) { + break; + } + subchunkDataSize = ma_dr_wav_bytes_to_u32(subchunkSizeBuffer); + if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_label, "labl") || ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_note, "note")) { + if (subchunkDataSize >= MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES) { + ma_uint64 stringSizeWithNullTerm = subchunkDataSize - MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)stringSizeWithNullTerm, 1); + } else { + subchunkBytesRead = ma_dr_wav__read_list_label_or_note_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], subchunkDataSize, ma_dr_wav_fourcc_equal(subchunkId, "labl") ? ma_dr_wav_metadata_type_list_label : ma_dr_wav_metadata_type_list_note); + if (subchunkBytesRead == subchunkDataSize) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_labelled_cue_region, "ltxt")) { + if (subchunkDataSize >= MA_DR_WAV_LIST_LABELLED_TEXT_BYTES) { + ma_uint64 stringSizeWithNullTerminator = subchunkDataSize - MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)stringSizeWithNullTerminator, 1); + } else { + subchunkBytesRead = ma_dr_wav__read_list_labelled_cue_region_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], subchunkDataSize); + if (subchunkBytesRead == subchunkDataSize) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_software, "ISFT")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_software); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_copyright, "ICOP")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_copyright); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_title, "INAM")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_title); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_artist, "IART")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_artist); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_comment, "ICMT")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_comment); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_date, "ICRD")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_date); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_genre, "IGNR")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_genre); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_album, "IPRD")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_album); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_tracknumber, "ITRK")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_tracknumber); + } else if ((allowedMetadataTypes & ma_dr_wav_metadata_type_unknown) != 0) { + subchunkBytesRead = ma_dr_wav__metadata_process_unknown_chunk(pParser, subchunkId, subchunkDataSize, listType); + } + bytesRead += subchunkBytesRead; + MA_DR_WAV_ASSERT(subchunkBytesRead <= subchunkDataSize); + if (subchunkBytesRead < subchunkDataSize) { + ma_uint64 bytesToSeek = subchunkDataSize - subchunkBytesRead; + if (!pParser->onSeek(pParser->pReadSeekUserData, (int)bytesToSeek, ma_dr_wav_seek_origin_current)) { + break; + } + bytesRead += bytesToSeek; + } + if ((subchunkDataSize % 2) == 1) { + if (!pParser->onSeek(pParser->pReadSeekUserData, 1, ma_dr_wav_seek_origin_current)) { + break; + } + bytesRead += 1; + } + } + } else if ((allowedMetadataTypes & ma_dr_wav_metadata_type_unknown) != 0) { + bytesRead = ma_dr_wav__metadata_process_unknown_chunk(pParser, pChunkID, pChunkHeader->sizeInBytes, ma_dr_wav_metadata_location_top_level); + } + return bytesRead; +} +MA_PRIVATE ma_uint32 ma_dr_wav_get_bytes_per_pcm_frame(ma_dr_wav* pWav) +{ + ma_uint32 bytesPerFrame; + if ((pWav->bitsPerSample & 0x7) == 0) { + bytesPerFrame = (pWav->bitsPerSample * pWav->fmt.channels) >> 3; + } else { + bytesPerFrame = pWav->fmt.blockAlign; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + if (bytesPerFrame != pWav->fmt.channels) { + return 0; + } + } + return bytesPerFrame; +} +MA_API ma_uint16 ma_dr_wav_fmt_get_format(const ma_dr_wav_fmt* pFMT) +{ + if (pFMT == NULL) { + return 0; + } + if (pFMT->formatTag != MA_DR_WAVE_FORMAT_EXTENSIBLE) { + return pFMT->formatTag; + } else { + return ma_dr_wav_bytes_to_u16(pFMT->subFormat); + } +} +MA_PRIVATE ma_bool32 ma_dr_wav_preinit(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pReadSeekUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL || onRead == NULL || onSeek == NULL) { + return MA_FALSE; + } + MA_DR_WAV_ZERO_MEMORY(pWav, sizeof(*pWav)); + pWav->onRead = onRead; + pWav->onSeek = onSeek; + pWav->pUserData = pReadSeekUserData; + pWav->allocationCallbacks = ma_dr_wav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { + return MA_FALSE; + } + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav_init__internal(ma_dr_wav* pWav, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags) +{ + ma_result result; + ma_uint64 cursor; + ma_bool32 sequential; + ma_uint8 riff[4]; + ma_dr_wav_fmt fmt; + unsigned short translatedFormatTag; + ma_uint64 dataChunkSize = 0; + ma_uint64 sampleCountFromFactChunk = 0; + ma_uint64 metadataStartPos; + ma_dr_wav__metadata_parser metadataParser; + ma_bool8 isProcessingMetadata = MA_FALSE; + ma_bool8 foundChunk_fmt = MA_FALSE; + ma_bool8 foundChunk_data = MA_FALSE; + ma_bool8 isAIFCFormType = MA_FALSE; + ma_uint64 aiffFrameCount = 0; + cursor = 0; + sequential = (flags & MA_DR_WAV_SEQUENTIAL) != 0; + MA_DR_WAV_ZERO_OBJECT(&fmt); + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) { + return MA_FALSE; + } + if (ma_dr_wav_fourcc_equal(riff, "RIFF")) { + pWav->container = ma_dr_wav_container_riff; + } else if (ma_dr_wav_fourcc_equal(riff, "RIFX")) { + pWav->container = ma_dr_wav_container_rifx; + } else if (ma_dr_wav_fourcc_equal(riff, "riff")) { + int i; + ma_uint8 riff2[12]; + pWav->container = ma_dr_wav_container_w64; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, riff2, sizeof(riff2), &cursor) != sizeof(riff2)) { + return MA_FALSE; + } + for (i = 0; i < 12; ++i) { + if (riff2[i] != ma_dr_wavGUID_W64_RIFF[i+4]) { + return MA_FALSE; + } + } + } else if (ma_dr_wav_fourcc_equal(riff, "RF64")) { + pWav->container = ma_dr_wav_container_rf64; + } else if (ma_dr_wav_fourcc_equal(riff, "FORM")) { + pWav->container = ma_dr_wav_container_aiff; + } else { + return MA_FALSE; + } + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) { + ma_uint8 chunkSizeBytes[4]; + ma_uint8 wave[4]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return MA_FALSE; + } + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx) { + if (ma_dr_wav_bytes_to_u32_ex(chunkSizeBytes, pWav->container) < 36) { + return MA_FALSE; + } + } else if (pWav->container == ma_dr_wav_container_rf64) { + if (ma_dr_wav_bytes_to_u32_le(chunkSizeBytes) != 0xFFFFFFFF) { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { + return MA_FALSE; + } + if (!ma_dr_wav_fourcc_equal(wave, "WAVE")) { + return MA_FALSE; + } + } else if (pWav->container == ma_dr_wav_container_w64) { + ma_uint8 chunkSizeBytes[8]; + ma_uint8 wave[16]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return MA_FALSE; + } + if (ma_dr_wav_bytes_to_u64(chunkSizeBytes) < 80) { + return MA_FALSE; + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { + return MA_FALSE; + } + if (!ma_dr_wav_guid_equal(wave, ma_dr_wavGUID_W64_WAVE)) { + return MA_FALSE; + } + } else if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint8 chunkSizeBytes[4]; + ma_uint8 aiff[4]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return MA_FALSE; + } + if (ma_dr_wav_bytes_to_u32_be(chunkSizeBytes) < 18) { + return MA_FALSE; + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, aiff, sizeof(aiff), &cursor) != sizeof(aiff)) { + return MA_FALSE; + } + if (ma_dr_wav_fourcc_equal(aiff, "AIFF")) { + isAIFCFormType = MA_FALSE; + } else if (ma_dr_wav_fourcc_equal(aiff, "AIFC")) { + isAIFCFormType = MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + if (pWav->container == ma_dr_wav_container_rf64) { + ma_uint8 sizeBytes[8]; + ma_uint64 bytesRemainingInChunk; + ma_dr_wav_chunk_header header; + result = ma_dr_wav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + if (!ma_dr_wav_fourcc_equal(header.id.fourcc, "ds64")) { + return MA_FALSE; + } + bytesRemainingInChunk = header.sizeInBytes + header.paddingSize; + if (!ma_dr_wav__seek_forward(pWav->onSeek, 8, pWav->pUserData)) { + return MA_FALSE; + } + bytesRemainingInChunk -= 8; + cursor += 8; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { + return MA_FALSE; + } + bytesRemainingInChunk -= 8; + dataChunkSize = ma_dr_wav_bytes_to_u64(sizeBytes); + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { + return MA_FALSE; + } + bytesRemainingInChunk -= 8; + sampleCountFromFactChunk = ma_dr_wav_bytes_to_u64(sizeBytes); + if (!ma_dr_wav__seek_forward(pWav->onSeek, bytesRemainingInChunk, pWav->pUserData)) { + return MA_FALSE; + } + cursor += bytesRemainingInChunk; + } + metadataStartPos = cursor; + isProcessingMetadata = !sequential && ((flags & MA_DR_WAV_WITH_METADATA) != 0); + if (pWav->container != ma_dr_wav_container_riff && pWav->container != ma_dr_wav_container_rf64) { + isProcessingMetadata = MA_FALSE; + } + MA_DR_WAV_ZERO_MEMORY(&metadataParser, sizeof(metadataParser)); + if (isProcessingMetadata) { + metadataParser.onRead = pWav->onRead; + metadataParser.onSeek = pWav->onSeek; + metadataParser.pReadSeekUserData = pWav->pUserData; + metadataParser.stage = ma_dr_wav__metadata_parser_stage_count; + } + for (;;) { + ma_dr_wav_chunk_header header; + ma_uint64 chunkSize; + result = ma_dr_wav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != MA_SUCCESS) { + break; + } + chunkSize = header.sizeInBytes; + if (!sequential && onChunk != NULL) { + ma_uint64 callbackBytesRead = onChunk(pChunkUserData, pWav->onRead, pWav->onSeek, pWav->pUserData, &header, pWav->container, &fmt); + if (callbackBytesRead > 0) { + if (ma_dr_wav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + } + } + if (((pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) && ma_dr_wav_fourcc_equal(header.id.fourcc, "fmt ")) || + ((pWav->container == ma_dr_wav_container_w64) && ma_dr_wav_guid_equal(header.id.guid, ma_dr_wavGUID_W64_FMT))) { + ma_uint8 fmtData[16]; + foundChunk_fmt = MA_TRUE; + if (pWav->onRead(pWav->pUserData, fmtData, sizeof(fmtData)) != sizeof(fmtData)) { + return MA_FALSE; + } + cursor += sizeof(fmtData); + fmt.formatTag = ma_dr_wav_bytes_to_u16_ex(fmtData + 0, pWav->container); + fmt.channels = ma_dr_wav_bytes_to_u16_ex(fmtData + 2, pWav->container); + fmt.sampleRate = ma_dr_wav_bytes_to_u32_ex(fmtData + 4, pWav->container); + fmt.avgBytesPerSec = ma_dr_wav_bytes_to_u32_ex(fmtData + 8, pWav->container); + fmt.blockAlign = ma_dr_wav_bytes_to_u16_ex(fmtData + 12, pWav->container); + fmt.bitsPerSample = ma_dr_wav_bytes_to_u16_ex(fmtData + 14, pWav->container); + fmt.extendedSize = 0; + fmt.validBitsPerSample = 0; + fmt.channelMask = 0; + MA_DR_WAV_ZERO_MEMORY(fmt.subFormat, sizeof(fmt.subFormat)); + if (header.sizeInBytes > 16) { + ma_uint8 fmt_cbSize[2]; + int bytesReadSoFar = 0; + if (pWav->onRead(pWav->pUserData, fmt_cbSize, sizeof(fmt_cbSize)) != sizeof(fmt_cbSize)) { + return MA_FALSE; + } + cursor += sizeof(fmt_cbSize); + bytesReadSoFar = 18; + fmt.extendedSize = ma_dr_wav_bytes_to_u16_ex(fmt_cbSize, pWav->container); + if (fmt.extendedSize > 0) { + if (fmt.formatTag == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + if (fmt.extendedSize != 22) { + return MA_FALSE; + } + } + if (fmt.formatTag == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + ma_uint8 fmtext[22]; + if (pWav->onRead(pWav->pUserData, fmtext, fmt.extendedSize) != fmt.extendedSize) { + return MA_FALSE; + } + fmt.validBitsPerSample = ma_dr_wav_bytes_to_u16_ex(fmtext + 0, pWav->container); + fmt.channelMask = ma_dr_wav_bytes_to_u32_ex(fmtext + 2, pWav->container); + ma_dr_wav_bytes_to_guid(fmtext + 6, fmt.subFormat); + } else { + if (pWav->onSeek(pWav->pUserData, fmt.extendedSize, ma_dr_wav_seek_origin_current) == MA_FALSE) { + return MA_FALSE; + } + } + cursor += fmt.extendedSize; + bytesReadSoFar += fmt.extendedSize; + } + if (pWav->onSeek(pWav->pUserData, (int)(header.sizeInBytes - bytesReadSoFar), ma_dr_wav_seek_origin_current) == MA_FALSE) { + return MA_FALSE; + } + cursor += (header.sizeInBytes - bytesReadSoFar); + } + if (header.paddingSize > 0) { + if (ma_dr_wav__seek_forward(pWav->onSeek, header.paddingSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += header.paddingSize; + } + continue; + } + if (((pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) && ma_dr_wav_fourcc_equal(header.id.fourcc, "data")) || + ((pWav->container == ma_dr_wav_container_w64) && ma_dr_wav_guid_equal(header.id.guid, ma_dr_wavGUID_W64_DATA))) { + foundChunk_data = MA_TRUE; + pWav->dataChunkDataPos = cursor; + if (pWav->container != ma_dr_wav_container_rf64) { + dataChunkSize = chunkSize; + } + if (sequential || !isProcessingMetadata) { + break; + } else { + chunkSize += header.paddingSize; + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + continue; + } + } + if (((pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) && ma_dr_wav_fourcc_equal(header.id.fourcc, "fact")) || + ((pWav->container == ma_dr_wav_container_w64) && ma_dr_wav_guid_equal(header.id.guid, ma_dr_wavGUID_W64_FACT))) { + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx) { + ma_uint8 sampleCount[4]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, &sampleCount, 4, &cursor) != 4) { + return MA_FALSE; + } + chunkSize -= 4; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + sampleCountFromFactChunk = ma_dr_wav_bytes_to_u32_ex(sampleCount, pWav->container); + } else { + sampleCountFromFactChunk = 0; + } + } else if (pWav->container == ma_dr_wav_container_w64) { + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) { + return MA_FALSE; + } + chunkSize -= 8; + } else if (pWav->container == ma_dr_wav_container_rf64) { + } + chunkSize += header.paddingSize; + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + continue; + } + if (pWav->container == ma_dr_wav_container_aiff && ma_dr_wav_fourcc_equal(header.id.fourcc, "COMM")) { + ma_uint8 commData[24]; + ma_uint32 commDataBytesToRead; + ma_uint16 channels; + ma_uint32 frameCount; + ma_uint16 sampleSizeInBits; + ma_int64 sampleRate; + ma_uint16 compressionFormat; + foundChunk_fmt = MA_TRUE; + if (isAIFCFormType) { + commDataBytesToRead = 24; + if (header.sizeInBytes < commDataBytesToRead) { + return MA_FALSE; + } + } else { + commDataBytesToRead = 18; + if (header.sizeInBytes != commDataBytesToRead) { + return MA_FALSE; + } + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, commData, commDataBytesToRead, &cursor) != commDataBytesToRead) { + return MA_FALSE; + } + channels = ma_dr_wav_bytes_to_u16_ex (commData + 0, pWav->container); + frameCount = ma_dr_wav_bytes_to_u32_ex (commData + 2, pWav->container); + sampleSizeInBits = ma_dr_wav_bytes_to_u16_ex (commData + 6, pWav->container); + sampleRate = ma_dr_wav_aiff_extented_to_s64(commData + 8); + if (sampleRate < 0 || sampleRate > 0xFFFFFFFF) { + return MA_FALSE; + } + if (isAIFCFormType) { + const ma_uint8* type = commData + 18; + if (ma_dr_wav_fourcc_equal(type, "NONE")) { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + } else if (ma_dr_wav_fourcc_equal(type, "raw ")) { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + if (sampleSizeInBits == 8) { + pWav->aiff.isUnsigned = MA_TRUE; + } + } else if (ma_dr_wav_fourcc_equal(type, "sowt")) { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + pWav->aiff.isLE = MA_TRUE; + } else if (ma_dr_wav_fourcc_equal(type, "fl32") || ma_dr_wav_fourcc_equal(type, "fl64") || ma_dr_wav_fourcc_equal(type, "FL32") || ma_dr_wav_fourcc_equal(type, "FL64")) { + compressionFormat = MA_DR_WAVE_FORMAT_IEEE_FLOAT; + } else if (ma_dr_wav_fourcc_equal(type, "alaw") || ma_dr_wav_fourcc_equal(type, "ALAW")) { + compressionFormat = MA_DR_WAVE_FORMAT_ALAW; + } else if (ma_dr_wav_fourcc_equal(type, "ulaw") || ma_dr_wav_fourcc_equal(type, "ULAW")) { + compressionFormat = MA_DR_WAVE_FORMAT_MULAW; + } else if (ma_dr_wav_fourcc_equal(type, "ima4")) { + compressionFormat = MA_DR_WAVE_FORMAT_DVI_ADPCM; + sampleSizeInBits = 4; + return MA_FALSE; + } else { + return MA_FALSE; + } + } else { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + } + aiffFrameCount = frameCount; + fmt.formatTag = compressionFormat; + fmt.channels = channels; + fmt.sampleRate = (ma_uint32)sampleRate; + fmt.bitsPerSample = sampleSizeInBits; + fmt.blockAlign = (ma_uint16)(fmt.channels * fmt.bitsPerSample / 8); + fmt.avgBytesPerSec = fmt.blockAlign * fmt.sampleRate; + if (fmt.blockAlign == 0 && compressionFormat == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + fmt.blockAlign = 34 * fmt.channels; + } + if (compressionFormat == MA_DR_WAVE_FORMAT_ALAW || compressionFormat == MA_DR_WAVE_FORMAT_MULAW) { + if (fmt.bitsPerSample > 8) { + fmt.bitsPerSample = 8; + fmt.blockAlign = fmt.channels; + } + } + fmt.bitsPerSample += (fmt.bitsPerSample & 7); + if (isAIFCFormType) { + if (ma_dr_wav__seek_forward(pWav->onSeek, (chunkSize - commDataBytesToRead), pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + cursor += (chunkSize - commDataBytesToRead); + } + continue; + } + if (pWav->container == ma_dr_wav_container_aiff && ma_dr_wav_fourcc_equal(header.id.fourcc, "SSND")) { + ma_uint8 offsetAndBlockSizeData[8]; + ma_uint32 offset; + foundChunk_data = MA_TRUE; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, offsetAndBlockSizeData, sizeof(offsetAndBlockSizeData), &cursor) != sizeof(offsetAndBlockSizeData)) { + return MA_FALSE; + } + offset = ma_dr_wav_bytes_to_u32_ex(offsetAndBlockSizeData + 0, pWav->container); + if (ma_dr_wav__seek_forward(pWav->onSeek, offset, pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + cursor += offset; + pWav->dataChunkDataPos = cursor; + dataChunkSize = chunkSize; + if (sequential || !isProcessingMetadata) { + break; + } else { + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + continue; + } + } + if (isProcessingMetadata) { + ma_uint64 metadataBytesRead; + metadataBytesRead = ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown); + MA_DR_WAV_ASSERT(metadataBytesRead <= header.sizeInBytes); + if (ma_dr_wav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == MA_FALSE) { + break; + } + } + chunkSize += header.paddingSize; + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + } + if (!foundChunk_fmt || !foundChunk_data) { + return MA_FALSE; + } + if ((fmt.sampleRate == 0 || fmt.sampleRate > MA_DR_WAV_MAX_SAMPLE_RATE ) || + (fmt.channels == 0 || fmt.channels > MA_DR_WAV_MAX_CHANNELS ) || + (fmt.bitsPerSample == 0 || fmt.bitsPerSample > MA_DR_WAV_MAX_BITS_PER_SAMPLE) || + fmt.blockAlign == 0) { + return MA_FALSE; + } + translatedFormatTag = fmt.formatTag; + if (translatedFormatTag == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + translatedFormatTag = ma_dr_wav_bytes_to_u16_ex(fmt.subFormat + 0, pWav->container); + } + if (!sequential) { + if (!ma_dr_wav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData)) { + return MA_FALSE; + } + cursor = pWav->dataChunkDataPos; + } + if (isProcessingMetadata && metadataParser.metadataCount > 0) { + if (ma_dr_wav__seek_from_start(pWav->onSeek, metadataStartPos, pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + result = ma_dr_wav__metadata_alloc(&metadataParser, &pWav->allocationCallbacks); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + metadataParser.stage = ma_dr_wav__metadata_parser_stage_read; + for (;;) { + ma_dr_wav_chunk_header header; + ma_uint64 metadataBytesRead; + result = ma_dr_wav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != MA_SUCCESS) { + break; + } + metadataBytesRead = ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown); + if (ma_dr_wav__seek_forward(pWav->onSeek, (header.sizeInBytes + header.paddingSize) - metadataBytesRead, pWav->pUserData) == MA_FALSE) { + ma_dr_wav_free(metadataParser.pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + } + pWav->pMetadata = metadataParser.pMetadata; + pWav->metadataCount = metadataParser.metadataCount; + } + if (dataChunkSize == 0xFFFFFFFF && (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx) && pWav->isSequentialWrite == MA_FALSE) { + dataChunkSize = 0; + for (;;) { + ma_uint8 temp[4096]; + size_t bytesRead = pWav->onRead(pWav->pUserData, temp, sizeof(temp)); + dataChunkSize += bytesRead; + if (bytesRead < sizeof(temp)) { + break; + } + } + } + if (ma_dr_wav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData) == MA_FALSE) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + pWav->fmt = fmt; + pWav->sampleRate = fmt.sampleRate; + pWav->channels = fmt.channels; + pWav->bitsPerSample = fmt.bitsPerSample; + pWav->bytesRemaining = dataChunkSize; + pWav->translatedFormatTag = translatedFormatTag; + pWav->dataChunkDataSize = dataChunkSize; + if (sampleCountFromFactChunk != 0) { + pWav->totalPCMFrameCount = sampleCountFromFactChunk; + } else if (aiffFrameCount != 0) { + pWav->totalPCMFrameCount = aiffFrameCount; + } else { + ma_uint32 bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + pWav->totalPCMFrameCount = dataChunkSize / bytesPerFrame; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + ma_uint64 totalBlockHeaderSizeInBytes; + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + if ((blockCount * fmt.blockAlign) < dataChunkSize) { + blockCount += 1; + } + totalBlockHeaderSizeInBytes = blockCount * (6*fmt.channels); + pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + ma_uint64 totalBlockHeaderSizeInBytes; + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + if ((blockCount * fmt.blockAlign) < dataChunkSize) { + blockCount += 1; + } + totalBlockHeaderSizeInBytes = blockCount * (4*fmt.channels); + pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; + pWav->totalPCMFrameCount += blockCount; + } + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + if (pWav->channels > 2) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + } + if (ma_dr_wav_get_bytes_per_pcm_frame(pWav) == 0) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (6*pWav->channels))) * 2)) / fmt.channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (4*pWav->channels))) * 2) + (blockCount * pWav->channels)) / fmt.channels; + } +#endif + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_ex(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, ma_dr_wav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit(pWav, onRead, onSeek, pReadSeekUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init__internal(pWav, onChunk, pChunkUserData, flags); +} +MA_API ma_bool32 ma_dr_wav_init_with_metadata(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit(pWav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init__internal(pWav, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA); +} +MA_API ma_dr_wav_metadata* ma_dr_wav_take_ownership_of_metadata(ma_dr_wav* pWav) +{ + ma_dr_wav_metadata *result = pWav->pMetadata; + pWav->pMetadata = NULL; + pWav->metadataCount = 0; + return result; +} +MA_PRIVATE size_t ma_dr_wav__write(ma_dr_wav* pWav, const void* pData, size_t dataSize) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + return pWav->onWrite(pWav->pUserData, pData, dataSize); +} +MA_PRIVATE size_t ma_dr_wav__write_byte(ma_dr_wav* pWav, ma_uint8 byte) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + return pWav->onWrite(pWav->pUserData, &byte, 1); +} +MA_PRIVATE size_t ma_dr_wav__write_u16ne_to_le(ma_dr_wav* pWav, ma_uint16 value) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + if (!ma_dr_wav__is_little_endian()) { + value = ma_dr_wav__bswap16(value); + } + return ma_dr_wav__write(pWav, &value, 2); +} +MA_PRIVATE size_t ma_dr_wav__write_u32ne_to_le(ma_dr_wav* pWav, ma_uint32 value) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + if (!ma_dr_wav__is_little_endian()) { + value = ma_dr_wav__bswap32(value); + } + return ma_dr_wav__write(pWav, &value, 4); +} +MA_PRIVATE size_t ma_dr_wav__write_u64ne_to_le(ma_dr_wav* pWav, ma_uint64 value) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + if (!ma_dr_wav__is_little_endian()) { + value = ma_dr_wav__bswap64(value); + } + return ma_dr_wav__write(pWav, &value, 8); +} +MA_PRIVATE size_t ma_dr_wav__write_f32ne_to_le(ma_dr_wav* pWav, float value) +{ + union { + ma_uint32 u32; + float f32; + } u; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + u.f32 = value; + if (!ma_dr_wav__is_little_endian()) { + u.u32 = ma_dr_wav__bswap32(u.u32); + } + return ma_dr_wav__write(pWav, &u.u32, 4); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count(ma_dr_wav* pWav, const void* pData, size_t dataSize) +{ + if (pWav == NULL) { + return dataSize; + } + return ma_dr_wav__write(pWav, pData, dataSize); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_byte(ma_dr_wav* pWav, ma_uint8 byte) +{ + if (pWav == NULL) { + return 1; + } + return ma_dr_wav__write_byte(pWav, byte); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_u16ne_to_le(ma_dr_wav* pWav, ma_uint16 value) +{ + if (pWav == NULL) { + return 2; + } + return ma_dr_wav__write_u16ne_to_le(pWav, value); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_u32ne_to_le(ma_dr_wav* pWav, ma_uint32 value) +{ + if (pWav == NULL) { + return 4; + } + return ma_dr_wav__write_u32ne_to_le(pWav, value); +} +#if 0 +MA_PRIVATE size_t ma_dr_wav__write_or_count_u64ne_to_le(ma_dr_wav* pWav, ma_uint64 value) +{ + if (pWav == NULL) { + return 8; + } + return ma_dr_wav__write_u64ne_to_le(pWav, value); +} +#endif +MA_PRIVATE size_t ma_dr_wav__write_or_count_f32ne_to_le(ma_dr_wav* pWav, float value) +{ + if (pWav == NULL) { + return 4; + } + return ma_dr_wav__write_f32ne_to_le(pWav, value); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_string_to_fixed_size_buf(ma_dr_wav* pWav, char* str, size_t bufFixedSize) +{ + size_t len; + if (pWav == NULL) { + return bufFixedSize; + } + len = ma_dr_wav__strlen_clamped(str, bufFixedSize); + ma_dr_wav__write_or_count(pWav, str, len); + if (len < bufFixedSize) { + size_t i; + for (i = 0; i < bufFixedSize - len; ++i) { + ma_dr_wav__write_byte(pWav, 0); + } + } + return bufFixedSize; +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_metadata(ma_dr_wav* pWav, ma_dr_wav_metadata* pMetadatas, ma_uint32 metadataCount) +{ + size_t bytesWritten = 0; + ma_bool32 hasListAdtl = MA_FALSE; + ma_bool32 hasListInfo = MA_FALSE; + ma_uint32 iMetadata; + if (pMetadatas == NULL || metadataCount == 0) { + return 0; + } + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + ma_uint32 chunkSize = 0; + if ((pMetadata->type & ma_dr_wav_metadata_type_list_all_info_strings) || (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_info_list)) { + hasListInfo = MA_TRUE; + } + if ((pMetadata->type & ma_dr_wav_metadata_type_list_all_adtl) || (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_adtl_list)) { + hasListAdtl = MA_TRUE; + } + switch (pMetadata->type) { + case ma_dr_wav_metadata_type_smpl: + { + ma_uint32 iLoop; + chunkSize = MA_DR_WAV_SMPL_BYTES + MA_DR_WAV_SMPL_LOOP_BYTES * pMetadata->data.smpl.sampleLoopCount + pMetadata->data.smpl.samplerSpecificDataSizeInBytes; + bytesWritten += ma_dr_wav__write_or_count(pWav, "smpl", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.manufacturerId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.productId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.samplePeriodNanoseconds); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.midiUnityNote); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.midiPitchFraction); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.smpteFormat); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.smpteOffset); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.sampleLoopCount); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); + for (iLoop = 0; iLoop < pMetadata->data.smpl.sampleLoopCount; ++iLoop) { + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].cuePointId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].type); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].firstSampleByteOffset); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].lastSampleByteOffset); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].sampleFraction); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].playCount); + } + if (pMetadata->data.smpl.samplerSpecificDataSizeInBytes > 0) { + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); + } + } break; + case ma_dr_wav_metadata_type_inst: + { + chunkSize = MA_DR_WAV_INST_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, "inst", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.midiUnityNote, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.fineTuneCents, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.gainDecibels, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.lowNote, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.highNote, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.lowVelocity, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.highVelocity, 1); + } break; + case ma_dr_wav_metadata_type_cue: + { + ma_uint32 iCuePoint; + chunkSize = MA_DR_WAV_CUE_BYTES + MA_DR_WAV_CUE_POINT_BYTES * pMetadata->data.cue.cuePointCount; + bytesWritten += ma_dr_wav__write_or_count(pWav, "cue ", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.cuePointCount); + for (iCuePoint = 0; iCuePoint < pMetadata->data.cue.cuePointCount; ++iCuePoint) { + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].id); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].playOrderPosition); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].chunkStart); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].blockStart); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].sampleByteOffset); + } + } break; + case ma_dr_wav_metadata_type_acid: + { + chunkSize = MA_DR_WAV_ACID_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, "acid", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.acid.flags); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.midiUnityNote); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.reserved1); + bytesWritten += ma_dr_wav__write_or_count_f32ne_to_le(pWav, pMetadata->data.acid.reserved2); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.acid.numBeats); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.meterDenominator); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.meterNumerator); + bytesWritten += ma_dr_wav__write_or_count_f32ne_to_le(pWav, pMetadata->data.acid.tempo); + } break; + case ma_dr_wav_metadata_type_bext: + { + char reservedBuf[MA_DR_WAV_BEXT_RESERVED_BYTES]; + ma_uint32 timeReferenceLow; + ma_uint32 timeReferenceHigh; + chunkSize = MA_DR_WAV_BEXT_BYTES + pMetadata->data.bext.codingHistorySize; + bytesWritten += ma_dr_wav__write_or_count(pWav, "bext", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pDescription, MA_DR_WAV_BEXT_DESCRIPTION_BYTES); + bytesWritten += ma_dr_wav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pOriginatorName, MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES); + bytesWritten += ma_dr_wav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pOriginatorReference, MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pOriginationDate, sizeof(pMetadata->data.bext.pOriginationDate)); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pOriginationTime, sizeof(pMetadata->data.bext.pOriginationTime)); + timeReferenceLow = (ma_uint32)(pMetadata->data.bext.timeReference & 0xFFFFFFFF); + timeReferenceHigh = (ma_uint32)(pMetadata->data.bext.timeReference >> 32); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, timeReferenceLow); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, timeReferenceHigh); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.version); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pUMID, MA_DR_WAV_BEXT_UMID_BYTES); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.loudnessValue); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.loudnessRange); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxTruePeakLevel); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxMomentaryLoudness); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxShortTermLoudness); + MA_DR_WAV_ZERO_MEMORY(reservedBuf, sizeof(reservedBuf)); + bytesWritten += ma_dr_wav__write_or_count(pWav, reservedBuf, sizeof(reservedBuf)); + if (pMetadata->data.bext.codingHistorySize > 0) { + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pCodingHistory, pMetadata->data.bext.codingHistorySize); + } + } break; + case ma_dr_wav_metadata_type_unknown: + { + if (pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_top_level) { + chunkSize = pMetadata->data.unknown.dataSizeInBytes; + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.id, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.pData, pMetadata->data.unknown.dataSizeInBytes); + } + } break; + default: break; + } + if ((chunkSize % 2) != 0) { + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, 0); + } + } + if (hasListInfo) { + ma_uint32 chunkSize = 4; + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + if ((pMetadata->type & ma_dr_wav_metadata_type_list_all_info_strings)) { + chunkSize += 8; + chunkSize += pMetadata->data.infoText.stringLength + 1; + } else if (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_info_list) { + chunkSize += 8; + chunkSize += pMetadata->data.unknown.dataSizeInBytes; + } + if ((chunkSize % 2) != 0) { + chunkSize += 1; + } + } + bytesWritten += ma_dr_wav__write_or_count(pWav, "LIST", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, "INFO", 4); + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + ma_uint32 subchunkSize = 0; + if (pMetadata->type & ma_dr_wav_metadata_type_list_all_info_strings) { + const char* pID = NULL; + switch (pMetadata->type) { + case ma_dr_wav_metadata_type_list_info_software: pID = "ISFT"; break; + case ma_dr_wav_metadata_type_list_info_copyright: pID = "ICOP"; break; + case ma_dr_wav_metadata_type_list_info_title: pID = "INAM"; break; + case ma_dr_wav_metadata_type_list_info_artist: pID = "IART"; break; + case ma_dr_wav_metadata_type_list_info_comment: pID = "ICMT"; break; + case ma_dr_wav_metadata_type_list_info_date: pID = "ICRD"; break; + case ma_dr_wav_metadata_type_list_info_genre: pID = "IGNR"; break; + case ma_dr_wav_metadata_type_list_info_album: pID = "IPRD"; break; + case ma_dr_wav_metadata_type_list_info_tracknumber: pID = "ITRK"; break; + default: break; + } + MA_DR_WAV_ASSERT(pID != NULL); + if (pMetadata->data.infoText.stringLength) { + subchunkSize = pMetadata->data.infoText.stringLength + 1; + bytesWritten += ma_dr_wav__write_or_count(pWav, pID, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.infoText.pString, pMetadata->data.infoText.stringLength); + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, '\0'); + } + } else if (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_info_list) { + if (pMetadata->data.unknown.dataSizeInBytes) { + subchunkSize = pMetadata->data.unknown.dataSizeInBytes; + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.id, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.unknown.dataSizeInBytes); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.pData, subchunkSize); + } + } + if ((subchunkSize % 2) != 0) { + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, 0); + } + } + } + if (hasListAdtl) { + ma_uint32 chunkSize = 4; + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + switch (pMetadata->type) + { + case ma_dr_wav_metadata_type_list_label: + case ma_dr_wav_metadata_type_list_note: + { + chunkSize += 8; + chunkSize += MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + if (pMetadata->data.labelOrNote.stringLength > 0) { + chunkSize += pMetadata->data.labelOrNote.stringLength + 1; + } + } break; + case ma_dr_wav_metadata_type_list_labelled_cue_region: + { + chunkSize += 8; + chunkSize += MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + if (pMetadata->data.labelledCueRegion.stringLength > 0) { + chunkSize += pMetadata->data.labelledCueRegion.stringLength + 1; + } + } break; + case ma_dr_wav_metadata_type_unknown: + { + if (pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_adtl_list) { + chunkSize += 8; + chunkSize += pMetadata->data.unknown.dataSizeInBytes; + } + } break; + default: break; + } + if ((chunkSize % 2) != 0) { + chunkSize += 1; + } + } + bytesWritten += ma_dr_wav__write_or_count(pWav, "LIST", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, "adtl", 4); + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + ma_uint32 subchunkSize = 0; + switch (pMetadata->type) + { + case ma_dr_wav_metadata_type_list_label: + case ma_dr_wav_metadata_type_list_note: + { + if (pMetadata->data.labelOrNote.stringLength > 0) { + const char *pID = NULL; + if (pMetadata->type == ma_dr_wav_metadata_type_list_label) { + pID = "labl"; + } + else if (pMetadata->type == ma_dr_wav_metadata_type_list_note) { + pID = "note"; + } + MA_DR_WAV_ASSERT(pID != NULL); + MA_DR_WAV_ASSERT(pMetadata->data.labelOrNote.pString != NULL); + subchunkSize = MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, pID, 4); + subchunkSize += pMetadata->data.labelOrNote.stringLength + 1; + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelOrNote.cuePointId); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.labelOrNote.pString, pMetadata->data.labelOrNote.stringLength); + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, '\0'); + } + } break; + case ma_dr_wav_metadata_type_list_labelled_cue_region: + { + subchunkSize = MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, "ltxt", 4); + if (pMetadata->data.labelledCueRegion.stringLength > 0) { + subchunkSize += pMetadata->data.labelledCueRegion.stringLength + 1; + } + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelledCueRegion.cuePointId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelledCueRegion.sampleLength); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.labelledCueRegion.purposeId, 4); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.country); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.language); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.dialect); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.codePage); + if (pMetadata->data.labelledCueRegion.stringLength > 0) { + MA_DR_WAV_ASSERT(pMetadata->data.labelledCueRegion.pString != NULL); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.labelledCueRegion.pString, pMetadata->data.labelledCueRegion.stringLength); + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, '\0'); + } + } break; + case ma_dr_wav_metadata_type_unknown: + { + if (pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_adtl_list) { + subchunkSize = pMetadata->data.unknown.dataSizeInBytes; + MA_DR_WAV_ASSERT(pMetadata->data.unknown.pData != NULL); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.id, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.pData, subchunkSize); + } + } break; + default: break; + } + if ((subchunkSize % 2) != 0) { + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, 0); + } + } + } + MA_DR_WAV_ASSERT((bytesWritten % 2) == 0); + return bytesWritten; +} +MA_PRIVATE ma_uint32 ma_dr_wav__riff_chunk_size_riff(ma_uint64 dataChunkSize, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount) +{ + ma_uint64 chunkSize = 4 + 24 + (ma_uint64)ma_dr_wav__write_or_count_metadata(NULL, pMetadata, metadataCount) + 8 + dataChunkSize + ma_dr_wav__chunk_padding_size_riff(dataChunkSize); + if (chunkSize > 0xFFFFFFFFUL) { + chunkSize = 0xFFFFFFFFUL; + } + return (ma_uint32)chunkSize; +} +MA_PRIVATE ma_uint32 ma_dr_wav__data_chunk_size_riff(ma_uint64 dataChunkSize) +{ + if (dataChunkSize <= 0xFFFFFFFFUL) { + return (ma_uint32)dataChunkSize; + } else { + return 0xFFFFFFFFUL; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav__riff_chunk_size_w64(ma_uint64 dataChunkSize) +{ + ma_uint64 dataSubchunkPaddingSize = ma_dr_wav__chunk_padding_size_w64(dataChunkSize); + return 80 + 24 + dataChunkSize + dataSubchunkPaddingSize; +} +MA_PRIVATE ma_uint64 ma_dr_wav__data_chunk_size_w64(ma_uint64 dataChunkSize) +{ + return 24 + dataChunkSize; +} +MA_PRIVATE ma_uint64 ma_dr_wav__riff_chunk_size_rf64(ma_uint64 dataChunkSize, ma_dr_wav_metadata *metadata, ma_uint32 numMetadata) +{ + ma_uint64 chunkSize = 4 + 36 + 24 + (ma_uint64)ma_dr_wav__write_or_count_metadata(NULL, metadata, numMetadata) + 8 + dataChunkSize + ma_dr_wav__chunk_padding_size_riff(dataChunkSize); + if (chunkSize > 0xFFFFFFFFUL) { + chunkSize = 0xFFFFFFFFUL; + } + return chunkSize; +} +MA_PRIVATE ma_uint64 ma_dr_wav__data_chunk_size_rf64(ma_uint64 dataChunkSize) +{ + return dataChunkSize; +} +MA_PRIVATE ma_bool32 ma_dr_wav_preinit_write(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_bool32 isSequential, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL || onWrite == NULL) { + return MA_FALSE; + } + if (!isSequential && onSeek == NULL) { + return MA_FALSE; + } + if (pFormat->format == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + return MA_FALSE; + } + if (pFormat->format == MA_DR_WAVE_FORMAT_ADPCM || pFormat->format == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return MA_FALSE; + } + MA_DR_WAV_ZERO_MEMORY(pWav, sizeof(*pWav)); + pWav->onWrite = onWrite; + pWav->onSeek = onSeek; + pWav->pUserData = pUserData; + pWav->allocationCallbacks = ma_dr_wav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { + return MA_FALSE; + } + pWav->fmt.formatTag = (ma_uint16)pFormat->format; + pWav->fmt.channels = (ma_uint16)pFormat->channels; + pWav->fmt.sampleRate = pFormat->sampleRate; + pWav->fmt.avgBytesPerSec = (ma_uint32)((pFormat->bitsPerSample * pFormat->sampleRate * pFormat->channels) / 8); + pWav->fmt.blockAlign = (ma_uint16)((pFormat->channels * pFormat->bitsPerSample) / 8); + pWav->fmt.bitsPerSample = (ma_uint16)pFormat->bitsPerSample; + pWav->fmt.extendedSize = 0; + pWav->isSequentialWrite = isSequential; + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_write__internal(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount) +{ + size_t runningPos = 0; + ma_uint64 initialDataChunkSize = 0; + ma_uint64 chunkSizeFMT; + if (pWav->isSequentialWrite) { + initialDataChunkSize = (totalSampleCount * pWav->fmt.bitsPerSample) / 8; + if (pFormat->container == ma_dr_wav_container_riff) { + if (initialDataChunkSize > (0xFFFFFFFFUL - 36)) { + return MA_FALSE; + } + } + } + pWav->dataChunkDataSizeTargetWrite = initialDataChunkSize; + if (pFormat->container == ma_dr_wav_container_riff) { + ma_uint32 chunkSizeRIFF = 28 + (ma_uint32)initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, "RIFF", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, chunkSizeRIFF); + runningPos += ma_dr_wav__write(pWav, "WAVE", 4); + } else if (pFormat->container == ma_dr_wav_container_w64) { + ma_uint64 chunkSizeRIFF = 80 + 24 + initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_RIFF, 16); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, chunkSizeRIFF); + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_WAVE, 16); + } else if (pFormat->container == ma_dr_wav_container_rf64) { + runningPos += ma_dr_wav__write(pWav, "RF64", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, 0xFFFFFFFF); + runningPos += ma_dr_wav__write(pWav, "WAVE", 4); + } else { + return MA_FALSE; + } + if (pFormat->container == ma_dr_wav_container_rf64) { + ma_uint32 initialds64ChunkSize = 28; + ma_uint64 initialRiffChunkSize = 8 + initialds64ChunkSize + initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, "ds64", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, initialds64ChunkSize); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, initialRiffChunkSize); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, initialDataChunkSize); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, totalSampleCount); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, 0); + } + if (pFormat->container == ma_dr_wav_container_riff || pFormat->container == ma_dr_wav_container_rf64) { + chunkSizeFMT = 16; + runningPos += ma_dr_wav__write(pWav, "fmt ", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, (ma_uint32)chunkSizeFMT); + } else if (pFormat->container == ma_dr_wav_container_w64) { + chunkSizeFMT = 40; + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_FMT, 16); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, chunkSizeFMT); + } + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.formatTag); + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.channels); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, pWav->fmt.sampleRate); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, pWav->fmt.avgBytesPerSec); + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.blockAlign); + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.bitsPerSample); + if (!pWav->isSequentialWrite && pWav->pMetadata != NULL && pWav->metadataCount > 0 && (pFormat->container == ma_dr_wav_container_riff || pFormat->container == ma_dr_wav_container_rf64)) { + runningPos += ma_dr_wav__write_or_count_metadata(pWav, pWav->pMetadata, pWav->metadataCount); + } + pWav->dataChunkDataPos = runningPos; + if (pFormat->container == ma_dr_wav_container_riff) { + ma_uint32 chunkSizeDATA = (ma_uint32)initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, "data", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, chunkSizeDATA); + } else if (pFormat->container == ma_dr_wav_container_w64) { + ma_uint64 chunkSizeDATA = 24 + initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_DATA, 16); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, chunkSizeDATA); + } else if (pFormat->container == ma_dr_wav_container_rf64) { + runningPos += ma_dr_wav__write(pWav, "data", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, 0xFFFFFFFF); + } + pWav->container = pFormat->container; + pWav->channels = (ma_uint16)pFormat->channels; + pWav->sampleRate = pFormat->sampleRate; + pWav->bitsPerSample = (ma_uint16)pFormat->bitsPerSample; + pWav->translatedFormatTag = (ma_uint16)pFormat->format; + pWav->dataChunkDataPos = runningPos; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init_write(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit_write(pWav, pFormat, MA_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init_write__internal(pWav, pFormat, 0); +} +MA_API ma_bool32 ma_dr_wav_init_write_sequential(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit_write(pWav, pFormat, MA_TRUE, onWrite, NULL, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init_write__internal(pWav, pFormat, totalSampleCount); +} +MA_API ma_bool32 ma_dr_wav_init_write_sequential_pcm_frames(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_write_sequential(pWav, pFormat, totalPCMFrameCount*pFormat->channels, onWrite, pUserData, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_write_with_metadata(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount) +{ + if (!ma_dr_wav_preinit_write(pWav, pFormat, MA_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->pMetadata = pMetadata; + pWav->metadataCount = metadataCount; + return ma_dr_wav_init_write__internal(pWav, pFormat, 0); +} +MA_API ma_uint64 ma_dr_wav_target_write_size_bytes(const ma_dr_wav_data_format* pFormat, ma_uint64 totalFrameCount, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount) +{ + ma_uint64 targetDataSizeBytes = (ma_uint64)((ma_int64)totalFrameCount * pFormat->channels * pFormat->bitsPerSample/8.0); + ma_uint64 riffChunkSizeBytes; + ma_uint64 fileSizeBytes = 0; + if (pFormat->container == ma_dr_wav_container_riff) { + riffChunkSizeBytes = ma_dr_wav__riff_chunk_size_riff(targetDataSizeBytes, pMetadata, metadataCount); + fileSizeBytes = (8 + riffChunkSizeBytes); + } else if (pFormat->container == ma_dr_wav_container_w64) { + riffChunkSizeBytes = ma_dr_wav__riff_chunk_size_w64(targetDataSizeBytes); + fileSizeBytes = riffChunkSizeBytes; + } else if (pFormat->container == ma_dr_wav_container_rf64) { + riffChunkSizeBytes = ma_dr_wav__riff_chunk_size_rf64(targetDataSizeBytes, pMetadata, metadataCount); + fileSizeBytes = (8 + riffChunkSizeBytes); + } + return fileSizeBytes; +} +#ifndef MA_DR_WAV_NO_STDIO +MA_PRIVATE size_t ma_dr_wav__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData); +} +MA_PRIVATE size_t ma_dr_wav__on_write_stdio(void* pUserData, const void* pData, size_t bytesToWrite) +{ + return fwrite(pData, 1, bytesToWrite, (FILE*)pUserData); +} +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek_stdio(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + return fseek((FILE*)pUserData, offset, (origin == ma_dr_wav_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} +MA_API ma_bool32 ma_dr_wav_init_file(ma_dr_wav* pWav, const char* filename, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_ex(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_file__internal_FILE(ma_dr_wav* pWav, FILE* pFile, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + result = ma_dr_wav_preinit(pWav, ma_dr_wav__on_read_stdio, ma_dr_wav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + result = ma_dr_wav_init__internal(pWav, onChunk, pChunkUserData, flags); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init_file_ex(ma_dr_wav* pWav, const char* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_fopen(&pFile, filename, "rb") != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_bool32 ma_dr_wav_init_file_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_ex_w(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_ex_w(ma_dr_wav* pWav, const wchar_t* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_wfopen(&pFile, filename, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); +} +#endif +MA_API ma_bool32 ma_dr_wav_init_file_with_metadata(ma_dr_wav* pWav, const char* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_fopen(&pFile, filename, "rb") != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_bool32 ma_dr_wav_init_file_with_metadata_w(ma_dr_wav* pWav, const wchar_t* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_wfopen(&pFile, filename, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA, pAllocationCallbacks); +} +#endif +MA_PRIVATE ma_bool32 ma_dr_wav_init_file_write__internal_FILE(ma_dr_wav* pWav, FILE* pFile, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + result = ma_dr_wav_preinit_write(pWav, pFormat, isSequential, ma_dr_wav__on_write_stdio, ma_dr_wav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + result = ma_dr_wav_init_write__internal(pWav, pFormat, totalSampleCount); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_file_write__internal(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_fopen(&pFile, filename, "wb") != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_PRIVATE ma_bool32 ma_dr_wav_init_file_write_w__internal(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_wfopen(&pFile, filename, L"wb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); +} +#endif +MA_API ma_bool32 ma_dr_wav_init_file_write(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write__internal(pWav, filename, pFormat, 0, MA_FALSE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write__internal(pWav, filename, pFormat, totalSampleCount, MA_TRUE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write_sequential(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_bool32 ma_dr_wav_init_file_write_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write_w__internal(pWav, filename, pFormat, 0, MA_FALSE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write_w__internal(pWav, filename, pFormat, totalSampleCount, MA_TRUE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write_sequential_w(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} +#endif +#endif +MA_PRIVATE size_t ma_dr_wav__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + size_t bytesRemaining; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->memoryStream.dataSize >= pWav->memoryStream.currentReadPos); + bytesRemaining = pWav->memoryStream.dataSize - pWav->memoryStream.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (bytesToRead > 0) { + MA_DR_WAV_COPY_MEMORY(pBufferOut, pWav->memoryStream.data + pWav->memoryStream.currentReadPos, bytesToRead); + pWav->memoryStream.currentReadPos += bytesToRead; + } + return bytesToRead; +} +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek_memory(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + MA_DR_WAV_ASSERT(pWav != NULL); + if (origin == ma_dr_wav_seek_origin_current) { + if (offset > 0) { + if (pWav->memoryStream.currentReadPos + offset > pWav->memoryStream.dataSize) { + return MA_FALSE; + } + } else { + if (pWav->memoryStream.currentReadPos < (size_t)-offset) { + return MA_FALSE; + } + } + pWav->memoryStream.currentReadPos += offset; + } else { + if ((ma_uint32)offset <= pWav->memoryStream.dataSize) { + pWav->memoryStream.currentReadPos = offset; + } else { + return MA_FALSE; + } + } + return MA_TRUE; +} +MA_PRIVATE size_t ma_dr_wav__on_write_memory(void* pUserData, const void* pDataIn, size_t bytesToWrite) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + size_t bytesRemaining; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->memoryStreamWrite.dataCapacity >= pWav->memoryStreamWrite.currentWritePos); + bytesRemaining = pWav->memoryStreamWrite.dataCapacity - pWav->memoryStreamWrite.currentWritePos; + if (bytesRemaining < bytesToWrite) { + void* pNewData; + size_t newDataCapacity = (pWav->memoryStreamWrite.dataCapacity == 0) ? 256 : pWav->memoryStreamWrite.dataCapacity * 2; + if ((newDataCapacity - pWav->memoryStreamWrite.currentWritePos) < bytesToWrite) { + newDataCapacity = pWav->memoryStreamWrite.currentWritePos + bytesToWrite; + } + pNewData = ma_dr_wav__realloc_from_callbacks(*pWav->memoryStreamWrite.ppData, newDataCapacity, pWav->memoryStreamWrite.dataCapacity, &pWav->allocationCallbacks); + if (pNewData == NULL) { + return 0; + } + *pWav->memoryStreamWrite.ppData = pNewData; + pWav->memoryStreamWrite.dataCapacity = newDataCapacity; + } + MA_DR_WAV_COPY_MEMORY(((ma_uint8*)(*pWav->memoryStreamWrite.ppData)) + pWav->memoryStreamWrite.currentWritePos, pDataIn, bytesToWrite); + pWav->memoryStreamWrite.currentWritePos += bytesToWrite; + if (pWav->memoryStreamWrite.dataSize < pWav->memoryStreamWrite.currentWritePos) { + pWav->memoryStreamWrite.dataSize = pWav->memoryStreamWrite.currentWritePos; + } + *pWav->memoryStreamWrite.pDataSize = pWav->memoryStreamWrite.dataSize; + return bytesToWrite; +} +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek_memory_write(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + MA_DR_WAV_ASSERT(pWav != NULL); + if (origin == ma_dr_wav_seek_origin_current) { + if (offset > 0) { + if (pWav->memoryStreamWrite.currentWritePos + offset > pWav->memoryStreamWrite.dataSize) { + offset = (int)(pWav->memoryStreamWrite.dataSize - pWav->memoryStreamWrite.currentWritePos); + } + } else { + if (pWav->memoryStreamWrite.currentWritePos < (size_t)-offset) { + offset = -(int)pWav->memoryStreamWrite.currentWritePos; + } + } + pWav->memoryStreamWrite.currentWritePos += offset; + } else { + if ((ma_uint32)offset <= pWav->memoryStreamWrite.dataSize) { + pWav->memoryStreamWrite.currentWritePos = offset; + } else { + pWav->memoryStreamWrite.currentWritePos = pWav->memoryStreamWrite.dataSize; + } + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init_memory(ma_dr_wav* pWav, const void* data, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_memory_ex(pWav, data, dataSize, NULL, NULL, 0, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_memory_ex(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (data == NULL || dataSize == 0) { + return MA_FALSE; + } + if (!ma_dr_wav_preinit(pWav, ma_dr_wav__on_read_memory, ma_dr_wav__on_seek_memory, pWav, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->memoryStream.data = (const ma_uint8*)data; + pWav->memoryStream.dataSize = dataSize; + pWav->memoryStream.currentReadPos = 0; + return ma_dr_wav_init__internal(pWav, onChunk, pChunkUserData, flags); +} +MA_API ma_bool32 ma_dr_wav_init_memory_with_metadata(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (data == NULL || dataSize == 0) { + return MA_FALSE; + } + if (!ma_dr_wav_preinit(pWav, ma_dr_wav__on_read_memory, ma_dr_wav__on_seek_memory, pWav, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->memoryStream.data = (const ma_uint8*)data; + pWav->memoryStream.dataSize = dataSize; + pWav->memoryStream.currentReadPos = 0; + return ma_dr_wav_init__internal(pWav, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA); +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_memory_write__internal(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (ppData == NULL || pDataSize == NULL) { + return MA_FALSE; + } + *ppData = NULL; + *pDataSize = 0; + if (!ma_dr_wav_preinit_write(pWav, pFormat, isSequential, ma_dr_wav__on_write_memory, ma_dr_wav__on_seek_memory_write, pWav, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->memoryStreamWrite.ppData = ppData; + pWav->memoryStreamWrite.pDataSize = pDataSize; + pWav->memoryStreamWrite.dataSize = 0; + pWav->memoryStreamWrite.dataCapacity = 0; + pWav->memoryStreamWrite.currentWritePos = 0; + return ma_dr_wav_init_write__internal(pWav, pFormat, totalSampleCount); +} +MA_API ma_bool32 ma_dr_wav_init_memory_write(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, 0, MA_FALSE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, totalSampleCount, MA_TRUE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential_pcm_frames(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_memory_write_sequential(pWav, ppData, pDataSize, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} +MA_API ma_result ma_dr_wav_uninit(ma_dr_wav* pWav) +{ + ma_result result = MA_SUCCESS; + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + if (pWav->onWrite != NULL) { + ma_uint32 paddingSize = 0; + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rf64) { + paddingSize = ma_dr_wav__chunk_padding_size_riff(pWav->dataChunkDataSize); + } else { + paddingSize = ma_dr_wav__chunk_padding_size_w64(pWav->dataChunkDataSize); + } + if (paddingSize > 0) { + ma_uint64 paddingData = 0; + ma_dr_wav__write(pWav, &paddingData, paddingSize); + } + if (pWav->onSeek && !pWav->isSequentialWrite) { + if (pWav->container == ma_dr_wav_container_riff) { + if (pWav->onSeek(pWav->pUserData, 4, ma_dr_wav_seek_origin_start)) { + ma_uint32 riffChunkSize = ma_dr_wav__riff_chunk_size_riff(pWav->dataChunkDataSize, pWav->pMetadata, pWav->metadataCount); + ma_dr_wav__write_u32ne_to_le(pWav, riffChunkSize); + } + if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos - 4, ma_dr_wav_seek_origin_start)) { + ma_uint32 dataChunkSize = ma_dr_wav__data_chunk_size_riff(pWav->dataChunkDataSize); + ma_dr_wav__write_u32ne_to_le(pWav, dataChunkSize); + } + } else if (pWav->container == ma_dr_wav_container_w64) { + if (pWav->onSeek(pWav->pUserData, 16, ma_dr_wav_seek_origin_start)) { + ma_uint64 riffChunkSize = ma_dr_wav__riff_chunk_size_w64(pWav->dataChunkDataSize); + ma_dr_wav__write_u64ne_to_le(pWav, riffChunkSize); + } + if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos - 8, ma_dr_wav_seek_origin_start)) { + ma_uint64 dataChunkSize = ma_dr_wav__data_chunk_size_w64(pWav->dataChunkDataSize); + ma_dr_wav__write_u64ne_to_le(pWav, dataChunkSize); + } + } else if (pWav->container == ma_dr_wav_container_rf64) { + int ds64BodyPos = 12 + 8; + if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 0, ma_dr_wav_seek_origin_start)) { + ma_uint64 riffChunkSize = ma_dr_wav__riff_chunk_size_rf64(pWav->dataChunkDataSize, pWav->pMetadata, pWav->metadataCount); + ma_dr_wav__write_u64ne_to_le(pWav, riffChunkSize); + } + if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 8, ma_dr_wav_seek_origin_start)) { + ma_uint64 dataChunkSize = ma_dr_wav__data_chunk_size_rf64(pWav->dataChunkDataSize); + ma_dr_wav__write_u64ne_to_le(pWav, dataChunkSize); + } + } + } + if (pWav->isSequentialWrite) { + if (pWav->dataChunkDataSize != pWav->dataChunkDataSizeTargetWrite) { + result = MA_INVALID_FILE; + } + } + } else { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + } +#ifndef MA_DR_WAV_NO_STDIO + if (pWav->onRead == ma_dr_wav__on_read_stdio || pWav->onWrite == ma_dr_wav__on_write_stdio) { + fclose((FILE*)pWav->pUserData); + } +#endif + return result; +} +MA_API size_t ma_dr_wav_read_raw(ma_dr_wav* pWav, size_t bytesToRead, void* pBufferOut) +{ + size_t bytesRead; + ma_uint32 bytesPerFrame; + if (pWav == NULL || bytesToRead == 0) { + return 0; + } + if (bytesToRead > pWav->bytesRemaining) { + bytesToRead = (size_t)pWav->bytesRemaining; + } + if (bytesToRead == 0) { + return 0; + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + if (pBufferOut != NULL) { + bytesRead = pWav->onRead(pWav->pUserData, pBufferOut, bytesToRead); + } else { + bytesRead = 0; + while (bytesRead < bytesToRead) { + size_t bytesToSeek = (bytesToRead - bytesRead); + if (bytesToSeek > 0x7FFFFFFF) { + bytesToSeek = 0x7FFFFFFF; + } + if (pWav->onSeek(pWav->pUserData, (int)bytesToSeek, ma_dr_wav_seek_origin_current) == MA_FALSE) { + break; + } + bytesRead += bytesToSeek; + } + while (bytesRead < bytesToRead) { + ma_uint8 buffer[4096]; + size_t bytesSeeked; + size_t bytesToSeek = (bytesToRead - bytesRead); + if (bytesToSeek > sizeof(buffer)) { + bytesToSeek = sizeof(buffer); + } + bytesSeeked = pWav->onRead(pWav->pUserData, buffer, bytesToSeek); + bytesRead += bytesSeeked; + if (bytesSeeked < bytesToSeek) { + break; + } + } + } + pWav->readCursorInPCMFrames += bytesRead / bytesPerFrame; + pWav->bytesRemaining -= bytesRead; + return bytesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint32 bytesPerFrame; + ma_uint64 bytesToRead; + ma_uint64 framesRemainingInFile; + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (ma_dr_wav__is_compressed_format_tag(pWav->translatedFormatTag)) { + return 0; + } + framesRemainingInFile = pWav->totalPCMFrameCount - pWav->readCursorInPCMFrames; + if (framesToRead > framesRemainingInFile) { + framesToRead = framesRemainingInFile; + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesToRead = framesToRead * bytesPerFrame; + if (bytesToRead > MA_SIZE_MAX) { + bytesToRead = (MA_SIZE_MAX / bytesPerFrame) * bytesPerFrame; + } + if (bytesToRead == 0) { + return 0; + } + return ma_dr_wav_read_raw(pWav, (size_t)bytesToRead, pBufferOut) / bytesPerFrame; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL) { + ma_uint32 bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + ma_dr_wav__bswap_samples(pBufferOut, framesRead*pWav->channels, bytesPerFrame/pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint64 framesRead = 0; + if (ma_dr_wav_is_container_be(pWav->container)) { + if (pWav->container != ma_dr_wav_container_aiff || pWav->aiff.isLE == MA_FALSE) { + if (ma_dr_wav__is_little_endian()) { + framesRead = ma_dr_wav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + } else { + framesRead = ma_dr_wav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + } + goto post_process; + } + } + if (ma_dr_wav__is_little_endian()) { + framesRead = ma_dr_wav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + } else { + framesRead = ma_dr_wav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + } +post_process: + { + if (pWav->container == ma_dr_wav_container_aiff && pWav->bitsPerSample == 8 && pWav->aiff.isUnsigned == MA_FALSE) { + if (pBufferOut != NULL) { + ma_uint64 iSample; + for (iSample = 0; iSample < framesRead * pWav->channels; iSample += 1) { + ((ma_uint8*)pBufferOut)[iSample] += 128; + } + } + } + } + return framesRead; +} +MA_PRIVATE ma_bool32 ma_dr_wav_seek_to_first_pcm_frame(ma_dr_wav* pWav) +{ + if (pWav->onWrite != NULL) { + return MA_FALSE; + } + if (!pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos, ma_dr_wav_seek_origin_start)) { + return MA_FALSE; + } + if (ma_dr_wav__is_compressed_format_tag(pWav->translatedFormatTag)) { + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + MA_DR_WAV_ZERO_OBJECT(&pWav->msadpcm); + } else if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + MA_DR_WAV_ZERO_OBJECT(&pWav->ima); + } else { + MA_DR_WAV_ASSERT(MA_FALSE); + } + } + pWav->readCursorInPCMFrames = 0; + pWav->bytesRemaining = pWav->dataChunkDataSize; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_seek_to_pcm_frame(ma_dr_wav* pWav, ma_uint64 targetFrameIndex) +{ + if (pWav == NULL || pWav->onSeek == NULL) { + return MA_FALSE; + } + if (pWav->onWrite != NULL) { + return MA_FALSE; + } + if (pWav->totalPCMFrameCount == 0) { + return MA_TRUE; + } + if (targetFrameIndex > pWav->totalPCMFrameCount) { + targetFrameIndex = pWav->totalPCMFrameCount; + } + if (ma_dr_wav__is_compressed_format_tag(pWav->translatedFormatTag)) { + if (targetFrameIndex < pWav->readCursorInPCMFrames) { + if (!ma_dr_wav_seek_to_first_pcm_frame(pWav)) { + return MA_FALSE; + } + } + if (targetFrameIndex > pWav->readCursorInPCMFrames) { + ma_uint64 offsetInFrames = targetFrameIndex - pWav->readCursorInPCMFrames; + ma_int16 devnull[2048]; + while (offsetInFrames > 0) { + ma_uint64 framesRead = 0; + ma_uint64 framesToRead = offsetInFrames; + if (framesToRead > ma_dr_wav_countof(devnull)/pWav->channels) { + framesToRead = ma_dr_wav_countof(devnull)/pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + framesRead = ma_dr_wav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, devnull); + } else if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + framesRead = ma_dr_wav_read_pcm_frames_s16__ima(pWav, framesToRead, devnull); + } else { + MA_DR_WAV_ASSERT(MA_FALSE); + } + if (framesRead != framesToRead) { + return MA_FALSE; + } + offsetInFrames -= framesRead; + } + } + } else { + ma_uint64 totalSizeInBytes; + ma_uint64 currentBytePos; + ma_uint64 targetBytePos; + ma_uint64 offset; + ma_uint32 bytesPerFrame; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return MA_FALSE; + } + totalSizeInBytes = pWav->totalPCMFrameCount * bytesPerFrame; + currentBytePos = totalSizeInBytes - pWav->bytesRemaining; + targetBytePos = targetFrameIndex * bytesPerFrame; + if (currentBytePos < targetBytePos) { + offset = (targetBytePos - currentBytePos); + } else { + if (!ma_dr_wav_seek_to_first_pcm_frame(pWav)) { + return MA_FALSE; + } + offset = targetBytePos; + } + while (offset > 0) { + int offset32 = ((offset > INT_MAX) ? INT_MAX : (int)offset); + if (!pWav->onSeek(pWav->pUserData, offset32, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + pWav->readCursorInPCMFrames += offset32 / bytesPerFrame; + pWav->bytesRemaining -= offset32; + offset -= offset32; + } + } + return MA_TRUE; +} +MA_API ma_result ma_dr_wav_get_cursor_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + *pCursor = 0; + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + *pCursor = pWav->readCursorInPCMFrames; + return MA_SUCCESS; +} +MA_API ma_result ma_dr_wav_get_length_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + *pLength = 0; + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + *pLength = pWav->totalPCMFrameCount; + return MA_SUCCESS; +} +MA_API size_t ma_dr_wav_write_raw(ma_dr_wav* pWav, size_t bytesToWrite, const void* pData) +{ + size_t bytesWritten; + if (pWav == NULL || bytesToWrite == 0 || pData == NULL) { + return 0; + } + bytesWritten = pWav->onWrite(pWav->pUserData, pData, bytesToWrite); + pWav->dataChunkDataSize += bytesWritten; + return bytesWritten; +} +MA_API ma_uint64 ma_dr_wav_write_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData) +{ + ma_uint64 bytesToWrite; + ma_uint64 bytesWritten; + const ma_uint8* pRunningData; + if (pWav == NULL || framesToWrite == 0 || pData == NULL) { + return 0; + } + bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); + if (bytesToWrite > MA_SIZE_MAX) { + return 0; + } + bytesWritten = 0; + pRunningData = (const ma_uint8*)pData; + while (bytesToWrite > 0) { + size_t bytesJustWritten; + ma_uint64 bytesToWriteThisIteration; + bytesToWriteThisIteration = bytesToWrite; + MA_DR_WAV_ASSERT(bytesToWriteThisIteration <= MA_SIZE_MAX); + bytesJustWritten = ma_dr_wav_write_raw(pWav, (size_t)bytesToWriteThisIteration, pRunningData); + if (bytesJustWritten == 0) { + break; + } + bytesToWrite -= bytesJustWritten; + bytesWritten += bytesJustWritten; + pRunningData += bytesJustWritten; + } + return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; +} +MA_API ma_uint64 ma_dr_wav_write_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData) +{ + ma_uint64 bytesToWrite; + ma_uint64 bytesWritten; + ma_uint32 bytesPerSample; + const ma_uint8* pRunningData; + if (pWav == NULL || framesToWrite == 0 || pData == NULL) { + return 0; + } + bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); + if (bytesToWrite > MA_SIZE_MAX) { + return 0; + } + bytesWritten = 0; + pRunningData = (const ma_uint8*)pData; + bytesPerSample = ma_dr_wav_get_bytes_per_pcm_frame(pWav) / pWav->channels; + if (bytesPerSample == 0) { + return 0; + } + while (bytesToWrite > 0) { + ma_uint8 temp[4096]; + ma_uint32 sampleCount; + size_t bytesJustWritten; + ma_uint64 bytesToWriteThisIteration; + bytesToWriteThisIteration = bytesToWrite; + MA_DR_WAV_ASSERT(bytesToWriteThisIteration <= MA_SIZE_MAX); + sampleCount = sizeof(temp)/bytesPerSample; + if (bytesToWriteThisIteration > ((ma_uint64)sampleCount)*bytesPerSample) { + bytesToWriteThisIteration = ((ma_uint64)sampleCount)*bytesPerSample; + } + MA_DR_WAV_COPY_MEMORY(temp, pRunningData, (size_t)bytesToWriteThisIteration); + ma_dr_wav__bswap_samples(temp, sampleCount, bytesPerSample); + bytesJustWritten = ma_dr_wav_write_raw(pWav, (size_t)bytesToWriteThisIteration, temp); + if (bytesJustWritten == 0) { + break; + } + bytesToWrite -= bytesJustWritten; + bytesWritten += bytesJustWritten; + pRunningData += bytesJustWritten; + } + return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; +} +MA_API ma_uint64 ma_dr_wav_write_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData) +{ + if (ma_dr_wav__is_little_endian()) { + return ma_dr_wav_write_pcm_frames_le(pWav, framesToWrite, pData); + } else { + return ma_dr_wav_write_pcm_frames_be(pWav, framesToWrite, pData); + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__msadpcm(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(framesToRead > 0); + while (pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + MA_DR_WAV_ASSERT(framesToRead > 0); + if (pWav->msadpcm.cachedFrameCount == 0 && pWav->msadpcm.bytesRemainingInBlock == 0) { + if (pWav->channels == 1) { + ma_uint8 header[7]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + pWav->msadpcm.predictor[0] = header[0]; + pWav->msadpcm.delta[0] = ma_dr_wav_bytes_to_s16(header + 1); + pWav->msadpcm.prevFrames[0][1] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 3); + pWav->msadpcm.prevFrames[0][0] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 5); + pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][0]; + pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.cachedFrameCount = 2; + } else { + ma_uint8 header[14]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + pWav->msadpcm.predictor[0] = header[0]; + pWav->msadpcm.predictor[1] = header[1]; + pWav->msadpcm.delta[0] = ma_dr_wav_bytes_to_s16(header + 2); + pWav->msadpcm.delta[1] = ma_dr_wav_bytes_to_s16(header + 4); + pWav->msadpcm.prevFrames[0][1] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 6); + pWav->msadpcm.prevFrames[1][1] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 8); + pWav->msadpcm.prevFrames[0][0] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 10); + pWav->msadpcm.prevFrames[1][0] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 12); + pWav->msadpcm.cachedFrames[0] = pWav->msadpcm.prevFrames[0][0]; + pWav->msadpcm.cachedFrames[1] = pWav->msadpcm.prevFrames[1][0]; + pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[1][1]; + pWav->msadpcm.cachedFrameCount = 2; + } + } + while (framesToRead > 0 && pWav->msadpcm.cachedFrameCount > 0 && pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + if (pBufferOut != NULL) { + ma_uint32 iSample = 0; + for (iSample = 0; iSample < pWav->channels; iSample += 1) { + pBufferOut[iSample] = (ma_int16)pWav->msadpcm.cachedFrames[(ma_dr_wav_countof(pWav->msadpcm.cachedFrames) - (pWav->msadpcm.cachedFrameCount*pWav->channels)) + iSample]; + } + pBufferOut += pWav->channels; + } + framesToRead -= 1; + totalFramesRead += 1; + pWav->readCursorInPCMFrames += 1; + pWav->msadpcm.cachedFrameCount -= 1; + } + if (framesToRead == 0) { + break; + } + if (pWav->msadpcm.cachedFrameCount == 0) { + if (pWav->msadpcm.bytesRemainingInBlock == 0) { + continue; + } else { + static ma_int32 adaptationTable[] = { + 230, 230, 230, 230, 307, 409, 512, 614, + 768, 614, 512, 409, 307, 230, 230, 230 + }; + static ma_int32 coeff1Table[] = { 256, 512, 0, 192, 240, 460, 392 }; + static ma_int32 coeff2Table[] = { 0, -256, 0, 64, 0, -208, -232 }; + ma_uint8 nibbles; + ma_int32 nibble0; + ma_int32 nibble1; + if (pWav->onRead(pWav->pUserData, &nibbles, 1) != 1) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock -= 1; + nibble0 = ((nibbles & 0xF0) >> 4); if ((nibbles & 0x80)) { nibble0 |= 0xFFFFFFF0UL; } + nibble1 = ((nibbles & 0x0F) >> 0); if ((nibbles & 0x08)) { nibble1 |= 0xFFFFFFF0UL; } + if (pWav->channels == 1) { + ma_int32 newSample0; + ma_int32 newSample1; + newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample0 += nibble0 * pWav->msadpcm.delta[0]; + newSample0 = ma_dr_wav_clamp(newSample0, -32768, 32767); + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample0; + newSample1 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample1 += nibble1 * pWav->msadpcm.delta[0]; + newSample1 = ma_dr_wav_clamp(newSample1, -32768, 32767); + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample1; + pWav->msadpcm.cachedFrames[2] = newSample0; + pWav->msadpcm.cachedFrames[3] = newSample1; + pWav->msadpcm.cachedFrameCount = 2; + } else { + ma_int32 newSample0; + ma_int32 newSample1; + newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample0 += nibble0 * pWav->msadpcm.delta[0]; + newSample0 = ma_dr_wav_clamp(newSample0, -32768, 32767); + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample0; + newSample1 = ((pWav->msadpcm.prevFrames[1][1] * coeff1Table[pWav->msadpcm.predictor[1]]) + (pWav->msadpcm.prevFrames[1][0] * coeff2Table[pWav->msadpcm.predictor[1]])) >> 8; + newSample1 += nibble1 * pWav->msadpcm.delta[1]; + newSample1 = ma_dr_wav_clamp(newSample1, -32768, 32767); + pWav->msadpcm.delta[1] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[1]) >> 8; + if (pWav->msadpcm.delta[1] < 16) { + pWav->msadpcm.delta[1] = 16; + } + pWav->msadpcm.prevFrames[1][0] = pWav->msadpcm.prevFrames[1][1]; + pWav->msadpcm.prevFrames[1][1] = newSample1; + pWav->msadpcm.cachedFrames[2] = newSample0; + pWav->msadpcm.cachedFrames[3] = newSample1; + pWav->msadpcm.cachedFrameCount = 1; + } + } + } + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__ima(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + ma_uint32 iChannel; + static ma_int32 indexTable[16] = { + -1, -1, -1, -1, 2, 4, 6, 8, + -1, -1, -1, -1, 2, 4, 6, 8 + }; + static ma_int32 stepTable[89] = { + 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, + 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, + 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, + 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, + 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, + 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, + 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, + 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, + 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 + }; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(framesToRead > 0); + while (pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + MA_DR_WAV_ASSERT(framesToRead > 0); + if (pWav->ima.cachedFrameCount == 0 && pWav->ima.bytesRemainingInBlock == 0) { + if (pWav->channels == 1) { + ma_uint8 header[4]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + if (header[2] >= ma_dr_wav_countof(stepTable)) { + pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, ma_dr_wav_seek_origin_current); + pWav->ima.bytesRemainingInBlock = 0; + return totalFramesRead; + } + pWav->ima.predictor[0] = (ma_int16)ma_dr_wav_bytes_to_u16(header + 0); + pWav->ima.stepIndex[0] = ma_dr_wav_clamp(header[2], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[ma_dr_wav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[0]; + pWav->ima.cachedFrameCount = 1; + } else { + ma_uint8 header[8]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + if (header[2] >= ma_dr_wav_countof(stepTable) || header[6] >= ma_dr_wav_countof(stepTable)) { + pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, ma_dr_wav_seek_origin_current); + pWav->ima.bytesRemainingInBlock = 0; + return totalFramesRead; + } + pWav->ima.predictor[0] = ma_dr_wav_bytes_to_s16(header + 0); + pWav->ima.stepIndex[0] = ma_dr_wav_clamp(header[2], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.predictor[1] = ma_dr_wav_bytes_to_s16(header + 4); + pWav->ima.stepIndex[1] = ma_dr_wav_clamp(header[6], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[ma_dr_wav_countof(pWav->ima.cachedFrames) - 2] = pWav->ima.predictor[0]; + pWav->ima.cachedFrames[ma_dr_wav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[1]; + pWav->ima.cachedFrameCount = 1; + } + } + while (framesToRead > 0 && pWav->ima.cachedFrameCount > 0 && pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + if (pBufferOut != NULL) { + ma_uint32 iSample; + for (iSample = 0; iSample < pWav->channels; iSample += 1) { + pBufferOut[iSample] = (ma_int16)pWav->ima.cachedFrames[(ma_dr_wav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + iSample]; + } + pBufferOut += pWav->channels; + } + framesToRead -= 1; + totalFramesRead += 1; + pWav->readCursorInPCMFrames += 1; + pWav->ima.cachedFrameCount -= 1; + } + if (framesToRead == 0) { + break; + } + if (pWav->ima.cachedFrameCount == 0) { + if (pWav->ima.bytesRemainingInBlock == 0) { + continue; + } else { + pWav->ima.cachedFrameCount = 8; + for (iChannel = 0; iChannel < pWav->channels; ++iChannel) { + ma_uint32 iByte; + ma_uint8 nibbles[4]; + if (pWav->onRead(pWav->pUserData, &nibbles, 4) != 4) { + pWav->ima.cachedFrameCount = 0; + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock -= 4; + for (iByte = 0; iByte < 4; ++iByte) { + ma_uint8 nibble0 = ((nibbles[iByte] & 0x0F) >> 0); + ma_uint8 nibble1 = ((nibbles[iByte] & 0xF0) >> 4); + ma_int32 step = stepTable[pWav->ima.stepIndex[iChannel]]; + ma_int32 predictor = pWav->ima.predictor[iChannel]; + ma_int32 diff = step >> 3; + if (nibble0 & 1) diff += step >> 2; + if (nibble0 & 2) diff += step >> 1; + if (nibble0 & 4) diff += step; + if (nibble0 & 8) diff = -diff; + predictor = ma_dr_wav_clamp(predictor + diff, -32768, 32767); + pWav->ima.predictor[iChannel] = predictor; + pWav->ima.stepIndex[iChannel] = ma_dr_wav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble0], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[(ma_dr_wav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+0)*pWav->channels + iChannel] = predictor; + step = stepTable[pWav->ima.stepIndex[iChannel]]; + predictor = pWav->ima.predictor[iChannel]; + diff = step >> 3; + if (nibble1 & 1) diff += step >> 2; + if (nibble1 & 2) diff += step >> 1; + if (nibble1 & 4) diff += step; + if (nibble1 & 8) diff = -diff; + predictor = ma_dr_wav_clamp(predictor + diff, -32768, 32767); + pWav->ima.predictor[iChannel] = predictor; + pWav->ima.stepIndex[iChannel] = ma_dr_wav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble1], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[(ma_dr_wav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+1)*pWav->channels + iChannel] = predictor; + } + } + } + } + } + return totalFramesRead; +} +#ifndef MA_DR_WAV_NO_CONVERSION_API +static unsigned short g_ma_dr_wavAlawTable[256] = { + 0xEA80, 0xEB80, 0xE880, 0xE980, 0xEE80, 0xEF80, 0xEC80, 0xED80, 0xE280, 0xE380, 0xE080, 0xE180, 0xE680, 0xE780, 0xE480, 0xE580, + 0xF540, 0xF5C0, 0xF440, 0xF4C0, 0xF740, 0xF7C0, 0xF640, 0xF6C0, 0xF140, 0xF1C0, 0xF040, 0xF0C0, 0xF340, 0xF3C0, 0xF240, 0xF2C0, + 0xAA00, 0xAE00, 0xA200, 0xA600, 0xBA00, 0xBE00, 0xB200, 0xB600, 0x8A00, 0x8E00, 0x8200, 0x8600, 0x9A00, 0x9E00, 0x9200, 0x9600, + 0xD500, 0xD700, 0xD100, 0xD300, 0xDD00, 0xDF00, 0xD900, 0xDB00, 0xC500, 0xC700, 0xC100, 0xC300, 0xCD00, 0xCF00, 0xC900, 0xCB00, + 0xFEA8, 0xFEB8, 0xFE88, 0xFE98, 0xFEE8, 0xFEF8, 0xFEC8, 0xFED8, 0xFE28, 0xFE38, 0xFE08, 0xFE18, 0xFE68, 0xFE78, 0xFE48, 0xFE58, + 0xFFA8, 0xFFB8, 0xFF88, 0xFF98, 0xFFE8, 0xFFF8, 0xFFC8, 0xFFD8, 0xFF28, 0xFF38, 0xFF08, 0xFF18, 0xFF68, 0xFF78, 0xFF48, 0xFF58, + 0xFAA0, 0xFAE0, 0xFA20, 0xFA60, 0xFBA0, 0xFBE0, 0xFB20, 0xFB60, 0xF8A0, 0xF8E0, 0xF820, 0xF860, 0xF9A0, 0xF9E0, 0xF920, 0xF960, + 0xFD50, 0xFD70, 0xFD10, 0xFD30, 0xFDD0, 0xFDF0, 0xFD90, 0xFDB0, 0xFC50, 0xFC70, 0xFC10, 0xFC30, 0xFCD0, 0xFCF0, 0xFC90, 0xFCB0, + 0x1580, 0x1480, 0x1780, 0x1680, 0x1180, 0x1080, 0x1380, 0x1280, 0x1D80, 0x1C80, 0x1F80, 0x1E80, 0x1980, 0x1880, 0x1B80, 0x1A80, + 0x0AC0, 0x0A40, 0x0BC0, 0x0B40, 0x08C0, 0x0840, 0x09C0, 0x0940, 0x0EC0, 0x0E40, 0x0FC0, 0x0F40, 0x0CC0, 0x0C40, 0x0DC0, 0x0D40, + 0x5600, 0x5200, 0x5E00, 0x5A00, 0x4600, 0x4200, 0x4E00, 0x4A00, 0x7600, 0x7200, 0x7E00, 0x7A00, 0x6600, 0x6200, 0x6E00, 0x6A00, + 0x2B00, 0x2900, 0x2F00, 0x2D00, 0x2300, 0x2100, 0x2700, 0x2500, 0x3B00, 0x3900, 0x3F00, 0x3D00, 0x3300, 0x3100, 0x3700, 0x3500, + 0x0158, 0x0148, 0x0178, 0x0168, 0x0118, 0x0108, 0x0138, 0x0128, 0x01D8, 0x01C8, 0x01F8, 0x01E8, 0x0198, 0x0188, 0x01B8, 0x01A8, + 0x0058, 0x0048, 0x0078, 0x0068, 0x0018, 0x0008, 0x0038, 0x0028, 0x00D8, 0x00C8, 0x00F8, 0x00E8, 0x0098, 0x0088, 0x00B8, 0x00A8, + 0x0560, 0x0520, 0x05E0, 0x05A0, 0x0460, 0x0420, 0x04E0, 0x04A0, 0x0760, 0x0720, 0x07E0, 0x07A0, 0x0660, 0x0620, 0x06E0, 0x06A0, + 0x02B0, 0x0290, 0x02F0, 0x02D0, 0x0230, 0x0210, 0x0270, 0x0250, 0x03B0, 0x0390, 0x03F0, 0x03D0, 0x0330, 0x0310, 0x0370, 0x0350 +}; +static unsigned short g_ma_dr_wavMulawTable[256] = { + 0x8284, 0x8684, 0x8A84, 0x8E84, 0x9284, 0x9684, 0x9A84, 0x9E84, 0xA284, 0xA684, 0xAA84, 0xAE84, 0xB284, 0xB684, 0xBA84, 0xBE84, + 0xC184, 0xC384, 0xC584, 0xC784, 0xC984, 0xCB84, 0xCD84, 0xCF84, 0xD184, 0xD384, 0xD584, 0xD784, 0xD984, 0xDB84, 0xDD84, 0xDF84, + 0xE104, 0xE204, 0xE304, 0xE404, 0xE504, 0xE604, 0xE704, 0xE804, 0xE904, 0xEA04, 0xEB04, 0xEC04, 0xED04, 0xEE04, 0xEF04, 0xF004, + 0xF0C4, 0xF144, 0xF1C4, 0xF244, 0xF2C4, 0xF344, 0xF3C4, 0xF444, 0xF4C4, 0xF544, 0xF5C4, 0xF644, 0xF6C4, 0xF744, 0xF7C4, 0xF844, + 0xF8A4, 0xF8E4, 0xF924, 0xF964, 0xF9A4, 0xF9E4, 0xFA24, 0xFA64, 0xFAA4, 0xFAE4, 0xFB24, 0xFB64, 0xFBA4, 0xFBE4, 0xFC24, 0xFC64, + 0xFC94, 0xFCB4, 0xFCD4, 0xFCF4, 0xFD14, 0xFD34, 0xFD54, 0xFD74, 0xFD94, 0xFDB4, 0xFDD4, 0xFDF4, 0xFE14, 0xFE34, 0xFE54, 0xFE74, + 0xFE8C, 0xFE9C, 0xFEAC, 0xFEBC, 0xFECC, 0xFEDC, 0xFEEC, 0xFEFC, 0xFF0C, 0xFF1C, 0xFF2C, 0xFF3C, 0xFF4C, 0xFF5C, 0xFF6C, 0xFF7C, + 0xFF88, 0xFF90, 0xFF98, 0xFFA0, 0xFFA8, 0xFFB0, 0xFFB8, 0xFFC0, 0xFFC8, 0xFFD0, 0xFFD8, 0xFFE0, 0xFFE8, 0xFFF0, 0xFFF8, 0x0000, + 0x7D7C, 0x797C, 0x757C, 0x717C, 0x6D7C, 0x697C, 0x657C, 0x617C, 0x5D7C, 0x597C, 0x557C, 0x517C, 0x4D7C, 0x497C, 0x457C, 0x417C, + 0x3E7C, 0x3C7C, 0x3A7C, 0x387C, 0x367C, 0x347C, 0x327C, 0x307C, 0x2E7C, 0x2C7C, 0x2A7C, 0x287C, 0x267C, 0x247C, 0x227C, 0x207C, + 0x1EFC, 0x1DFC, 0x1CFC, 0x1BFC, 0x1AFC, 0x19FC, 0x18FC, 0x17FC, 0x16FC, 0x15FC, 0x14FC, 0x13FC, 0x12FC, 0x11FC, 0x10FC, 0x0FFC, + 0x0F3C, 0x0EBC, 0x0E3C, 0x0DBC, 0x0D3C, 0x0CBC, 0x0C3C, 0x0BBC, 0x0B3C, 0x0ABC, 0x0A3C, 0x09BC, 0x093C, 0x08BC, 0x083C, 0x07BC, + 0x075C, 0x071C, 0x06DC, 0x069C, 0x065C, 0x061C, 0x05DC, 0x059C, 0x055C, 0x051C, 0x04DC, 0x049C, 0x045C, 0x041C, 0x03DC, 0x039C, + 0x036C, 0x034C, 0x032C, 0x030C, 0x02EC, 0x02CC, 0x02AC, 0x028C, 0x026C, 0x024C, 0x022C, 0x020C, 0x01EC, 0x01CC, 0x01AC, 0x018C, + 0x0174, 0x0164, 0x0154, 0x0144, 0x0134, 0x0124, 0x0114, 0x0104, 0x00F4, 0x00E4, 0x00D4, 0x00C4, 0x00B4, 0x00A4, 0x0094, 0x0084, + 0x0078, 0x0070, 0x0068, 0x0060, 0x0058, 0x0050, 0x0048, 0x0040, 0x0038, 0x0030, 0x0028, 0x0020, 0x0018, 0x0010, 0x0008, 0x0000 +}; +static MA_INLINE ma_int16 ma_dr_wav__alaw_to_s16(ma_uint8 sampleIn) +{ + return (short)g_ma_dr_wavAlawTable[sampleIn]; +} +static MA_INLINE ma_int16 ma_dr_wav__mulaw_to_s16(ma_uint8 sampleIn) +{ + return (short)g_ma_dr_wavMulawTable[sampleIn]; +} +MA_PRIVATE void ma_dr_wav__pcm_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + size_t i; + if (bytesPerSample == 1) { + ma_dr_wav_u8_to_s16(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 2) { + for (i = 0; i < totalSampleCount; ++i) { + *pOut++ = ((const ma_int16*)pIn)[i]; + } + return; + } + if (bytesPerSample == 3) { + ma_dr_wav_s24_to_s16(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 4) { + ma_dr_wav_s32_to_s16(pOut, (const ma_int32*)pIn, totalSampleCount); + return; + } + if (bytesPerSample > 8) { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } + for (i = 0; i < totalSampleCount; ++i) { + ma_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + MA_DR_WAV_ASSERT(j < 8); + sample |= (ma_uint64)(pIn[j]) << shift; + shift += 8; + } + pIn += j; + *pOut++ = (ma_int16)((ma_int64)sample >> 48); + } +} +MA_PRIVATE void ma_dr_wav__ieee_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + ma_dr_wav_f32_to_s16(pOut, (const float*)pIn, totalSampleCount); + return; + } else if (bytesPerSample == 8) { + ma_dr_wav_f64_to_s16(pOut, (const double*)pIn, totalSampleCount); + return; + } else { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__pcm(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if ((pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 16) || pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__pcm_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__ieee(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__ieee_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__alaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_alaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } +#endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__mulaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_mulaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } +#endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + if (framesToRead * pWav->channels * sizeof(ma_int16) > MA_SIZE_MAX) { + framesToRead = MA_SIZE_MAX / sizeof(ma_int16) / pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM) { + return ma_dr_wav_read_pcm_frames_s16__pcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT) { + return ma_dr_wav_read_pcm_frames_s16__ieee(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW) { + return ma_dr_wav_read_pcm_frames_s16__alaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + return ma_dr_wav_read_pcm_frames_s16__mulaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + return ma_dr_wav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return ma_dr_wav_read_pcm_frames_s16__ima(pWav, framesToRead, pBufferOut); + } + return 0; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_FALSE) { + ma_dr_wav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_TRUE) { + ma_dr_wav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API void ma_dr_wav_u8_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = pIn[i]; + r = x << 8; + r = r - 32768; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_s24_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = ((int)(((unsigned int)(((const ma_uint8*)pIn)[i*3+0]) << 8) | ((unsigned int)(((const ma_uint8*)pIn)[i*3+1]) << 16) | ((unsigned int)(((const ma_uint8*)pIn)[i*3+2])) << 24)) >> 8; + r = x >> 8; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_s32_to_s16(ma_int16* pOut, const ma_int32* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = pIn[i]; + r = x >> 16; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_f32_to_s16(ma_int16* pOut, const float* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + float x = pIn[i]; + float c; + c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + c = c + 1; + r = (int)(c * 32767.5f); + r = r - 32768; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_f64_to_s16(ma_int16* pOut, const double* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + double x = pIn[i]; + double c; + c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + c = c + 1; + r = (int)(c * 32767.5); + r = r - 32768; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_alaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + for (i = 0; i < sampleCount; ++i) { + pOut[i] = ma_dr_wav__alaw_to_s16(pIn[i]); + } +} +MA_API void ma_dr_wav_mulaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + for (i = 0; i < sampleCount; ++i) { + pOut[i] = ma_dr_wav__mulaw_to_s16(pIn[i]); + } +} +MA_PRIVATE void ma_dr_wav__pcm_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) +{ + unsigned int i; + if (bytesPerSample == 1) { + ma_dr_wav_u8_to_f32(pOut, pIn, sampleCount); + return; + } + if (bytesPerSample == 2) { + ma_dr_wav_s16_to_f32(pOut, (const ma_int16*)pIn, sampleCount); + return; + } + if (bytesPerSample == 3) { + ma_dr_wav_s24_to_f32(pOut, pIn, sampleCount); + return; + } + if (bytesPerSample == 4) { + ma_dr_wav_s32_to_f32(pOut, (const ma_int32*)pIn, sampleCount); + return; + } + if (bytesPerSample > 8) { + MA_DR_WAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); + return; + } + for (i = 0; i < sampleCount; ++i) { + ma_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + MA_DR_WAV_ASSERT(j < 8); + sample |= (ma_uint64)(pIn[j]) << shift; + shift += 8; + } + pIn += j; + *pOut++ = (float)((ma_int64)sample / 9223372036854775807.0); + } +} +MA_PRIVATE void ma_dr_wav__ieee_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + unsigned int i; + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((const float*)pIn)[i]; + } + return; + } else if (bytesPerSample == 8) { + ma_dr_wav_f64_to_f32(pOut, (const double*)pIn, sampleCount); + return; + } else { + MA_DR_WAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); + return; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__pcm(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__pcm_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__msadpcm_ima(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_int16 samples16[2048]; + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, ma_dr_wav_countof(samples16)/pWav->channels); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToReadThisIteration, samples16); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + ma_dr_wav_s16_to_f32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__ieee(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT && pWav->bitsPerSample == 32) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__ieee_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__alaw(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_alaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } +#endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__mulaw(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_mulaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } +#endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + if (framesToRead * pWav->channels * sizeof(float) > MA_SIZE_MAX) { + framesToRead = MA_SIZE_MAX / sizeof(float) / pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM) { + return ma_dr_wav_read_pcm_frames_f32__pcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return ma_dr_wav_read_pcm_frames_f32__msadpcm_ima(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT) { + return ma_dr_wav_read_pcm_frames_f32__ieee(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW) { + return ma_dr_wav_read_pcm_frames_f32__alaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + return ma_dr_wav_read_pcm_frames_f32__mulaw(pWav, framesToRead, pBufferOut); + } + return 0; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32le(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_FALSE) { + ma_dr_wav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32be(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_TRUE) { + ma_dr_wav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API void ma_dr_wav_u8_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (pIn[i] / 256.0f) * 2 - 1; + } +#else + for (i = 0; i < sampleCount; ++i) { + float x = pIn[i]; + x = x * 0.00784313725490196078f; + x = x - 1; + *pOut++ = x; + } +#endif +} +MA_API void ma_dr_wav_s16_to_f32(float* pOut, const ma_int16* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = pIn[i] * 0.000030517578125f; + } +} +MA_API void ma_dr_wav_s24_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + double x; + ma_uint32 a = ((ma_uint32)(pIn[i*3+0]) << 8); + ma_uint32 b = ((ma_uint32)(pIn[i*3+1]) << 16); + ma_uint32 c = ((ma_uint32)(pIn[i*3+2]) << 24); + x = (double)((ma_int32)(a | b | c) >> 8); + *pOut++ = (float)(x * 0.00000011920928955078125); + } +} +MA_API void ma_dr_wav_s32_to_f32(float* pOut, const ma_int32* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (float)(pIn[i] / 2147483648.0); + } +} +MA_API void ma_dr_wav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (float)pIn[i]; + } +} +MA_API void ma_dr_wav_alaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ma_dr_wav__alaw_to_s16(pIn[i]) / 32768.0f; + } +} +MA_API void ma_dr_wav_mulaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ma_dr_wav__mulaw_to_s16(pIn[i]) / 32768.0f; + } +} +MA_PRIVATE void ma_dr_wav__pcm_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + unsigned int i; + if (bytesPerSample == 1) { + ma_dr_wav_u8_to_s32(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 2) { + ma_dr_wav_s16_to_s32(pOut, (const ma_int16*)pIn, totalSampleCount); + return; + } + if (bytesPerSample == 3) { + ma_dr_wav_s24_to_s32(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 4) { + for (i = 0; i < totalSampleCount; ++i) { + *pOut++ = ((const ma_int32*)pIn)[i]; + } + return; + } + if (bytesPerSample > 8) { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } + for (i = 0; i < totalSampleCount; ++i) { + ma_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + MA_DR_WAV_ASSERT(j < 8); + sample |= (ma_uint64)(pIn[j]) << shift; + shift += 8; + } + pIn += j; + *pOut++ = (ma_int32)((ma_int64)sample >> 32); + } +} +MA_PRIVATE void ma_dr_wav__ieee_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + ma_dr_wav_f32_to_s32(pOut, (const float*)pIn, totalSampleCount); + return; + } else if (bytesPerSample == 8) { + ma_dr_wav_f64_to_s32(pOut, (const double*)pIn, totalSampleCount); + return; + } else { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__pcm(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 32) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__pcm_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__msadpcm_ima(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + ma_int16 samples16[2048]; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, ma_dr_wav_countof(samples16)/pWav->channels); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToReadThisIteration, samples16); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + ma_dr_wav_s16_to_s32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__ieee(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__ieee_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__alaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_alaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } +#endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__mulaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_mulaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } +#endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + if (framesToRead * pWav->channels * sizeof(ma_int32) > MA_SIZE_MAX) { + framesToRead = MA_SIZE_MAX / sizeof(ma_int32) / pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM) { + return ma_dr_wav_read_pcm_frames_s32__pcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return ma_dr_wav_read_pcm_frames_s32__msadpcm_ima(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT) { + return ma_dr_wav_read_pcm_frames_s32__ieee(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW) { + return ma_dr_wav_read_pcm_frames_s32__alaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + return ma_dr_wav_read_pcm_frames_s32__mulaw(pWav, framesToRead, pBufferOut); + } + return 0; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_FALSE) { + ma_dr_wav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_TRUE) { + ma_dr_wav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API void ma_dr_wav_u8_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((int)pIn[i] - 128) << 24; + } +} +MA_API void ma_dr_wav_s16_to_s32(ma_int32* pOut, const ma_int16* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = pIn[i] << 16; + } +} +MA_API void ma_dr_wav_s24_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + unsigned int s0 = pIn[i*3 + 0]; + unsigned int s1 = pIn[i*3 + 1]; + unsigned int s2 = pIn[i*3 + 2]; + ma_int32 sample32 = (ma_int32)((s0 << 8) | (s1 << 16) | (s2 << 24)); + *pOut++ = sample32; + } +} +MA_API void ma_dr_wav_f32_to_s32(ma_int32* pOut, const float* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (ma_int32)(2147483648.0 * pIn[i]); + } +} +MA_API void ma_dr_wav_f64_to_s32(ma_int32* pOut, const double* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (ma_int32)(2147483648.0 * pIn[i]); + } +} +MA_API void ma_dr_wav_alaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((ma_int32)ma_dr_wav__alaw_to_s16(pIn[i])) << 16; + } +} +MA_API void ma_dr_wav_mulaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i= 0; i < sampleCount; ++i) { + *pOut++ = ((ma_int32)ma_dr_wav__mulaw_to_s16(pIn[i])) << 16; + } +} +MA_PRIVATE ma_int16* ma_dr_wav__read_pcm_frames_and_close_s16(ma_dr_wav* pWav, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalFrameCount) +{ + ma_uint64 sampleDataSize; + ma_int16* pSampleData; + ma_uint64 framesRead; + MA_DR_WAV_ASSERT(pWav != NULL); + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(ma_int16); + if (sampleDataSize > MA_SIZE_MAX) { + ma_dr_wav_uninit(pWav); + return NULL; + } + pSampleData = (ma_int16*)ma_dr_wav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); + if (pSampleData == NULL) { + ma_dr_wav_uninit(pWav); + return NULL; + } + framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + ma_dr_wav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + ma_dr_wav_uninit(pWav); + return NULL; + } + ma_dr_wav_uninit(pWav); + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + return pSampleData; +} +MA_PRIVATE float* ma_dr_wav__read_pcm_frames_and_close_f32(ma_dr_wav* pWav, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalFrameCount) +{ + ma_uint64 sampleDataSize; + float* pSampleData; + ma_uint64 framesRead; + MA_DR_WAV_ASSERT(pWav != NULL); + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(float); + if (sampleDataSize > MA_SIZE_MAX) { + ma_dr_wav_uninit(pWav); + return NULL; + } + pSampleData = (float*)ma_dr_wav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); + if (pSampleData == NULL) { + ma_dr_wav_uninit(pWav); + return NULL; + } + framesRead = ma_dr_wav_read_pcm_frames_f32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + ma_dr_wav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + ma_dr_wav_uninit(pWav); + return NULL; + } + ma_dr_wav_uninit(pWav); + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + return pSampleData; +} +MA_PRIVATE ma_int32* ma_dr_wav__read_pcm_frames_and_close_s32(ma_dr_wav* pWav, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalFrameCount) +{ + ma_uint64 sampleDataSize; + ma_int32* pSampleData; + ma_uint64 framesRead; + MA_DR_WAV_ASSERT(pWav != NULL); + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(ma_int32); + if (sampleDataSize > MA_SIZE_MAX) { + ma_dr_wav_uninit(pWav); + return NULL; + } + pSampleData = (ma_int32*)ma_dr_wav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); + if (pSampleData == NULL) { + ma_dr_wav_uninit(pWav); + return NULL; + } + framesRead = ma_dr_wav_read_pcm_frames_s32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + ma_dr_wav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + ma_dr_wav_uninit(pWav); + return NULL; + } + ma_dr_wav_uninit(pWav); + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + return pSampleData; +} +MA_API ma_int16* ma_dr_wav_open_and_read_pcm_frames_s16(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_and_read_pcm_frames_f32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_and_read_pcm_frames_s32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#ifndef MA_DR_WAV_NO_STDIO +MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#endif +#endif +MA_API ma_int16* ma_dr_wav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#endif +MA_API void ma_dr_wav_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + ma_dr_wav__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma_dr_wav__free_default(p, NULL); + } +} +MA_API ma_uint16 ma_dr_wav_bytes_to_u16(const ma_uint8* data) +{ + return ((ma_uint16)data[0] << 0) | ((ma_uint16)data[1] << 8); +} +MA_API ma_int16 ma_dr_wav_bytes_to_s16(const ma_uint8* data) +{ + return (ma_int16)ma_dr_wav_bytes_to_u16(data); +} +MA_API ma_uint32 ma_dr_wav_bytes_to_u32(const ma_uint8* data) +{ + return ma_dr_wav_bytes_to_u32_le(data); +} +MA_API float ma_dr_wav_bytes_to_f32(const ma_uint8* data) +{ + union { + ma_uint32 u32; + float f32; + } value; + value.u32 = ma_dr_wav_bytes_to_u32(data); + return value.f32; +} +MA_API ma_int32 ma_dr_wav_bytes_to_s32(const ma_uint8* data) +{ + return (ma_int32)ma_dr_wav_bytes_to_u32(data); +} +MA_API ma_uint64 ma_dr_wav_bytes_to_u64(const ma_uint8* data) +{ + return + ((ma_uint64)data[0] << 0) | ((ma_uint64)data[1] << 8) | ((ma_uint64)data[2] << 16) | ((ma_uint64)data[3] << 24) | + ((ma_uint64)data[4] << 32) | ((ma_uint64)data[5] << 40) | ((ma_uint64)data[6] << 48) | ((ma_uint64)data[7] << 56); +} +MA_API ma_int64 ma_dr_wav_bytes_to_s64(const ma_uint8* data) +{ + return (ma_int64)ma_dr_wav_bytes_to_u64(data); +} +MA_API ma_bool32 ma_dr_wav_guid_equal(const ma_uint8 a[16], const ma_uint8 b[16]) +{ + int i; + for (i = 0; i < 16; i += 1) { + if (a[i] != b[i]) { + return MA_FALSE; + } + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_fourcc_equal(const ma_uint8* a, const char* b) +{ + return + a[0] == b[0] && + a[1] == b[1] && + a[2] == b[2] && + a[3] == b[3]; +} +#ifdef __MRC__ +#pragma options opt reset +#endif +#endif +/* dr_wav_c end */ +#endif /* MA_DR_WAV_IMPLEMENTATION */ +#endif /* MA_NO_WAV */ + +#if !defined(MA_NO_FLAC) && !defined(MA_NO_DECODING) +#if !defined(MA_DR_FLAC_IMPLEMENTATION) && !defined(MA_DR_FLAC_IMPLEMENTATION) /* For backwards compatibility. Will be removed in version 0.11 for cleanliness. */ +/* dr_flac_c begin */ +#ifndef ma_dr_flac_c +#define ma_dr_flac_c +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#if __GNUC__ >= 7 +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +#endif +#endif +#ifdef __linux__ +#ifndef _BSD_SOURCE +#define _BSD_SOURCE +#endif +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE +#endif +#ifndef __USE_BSD +#define __USE_BSD +#endif +#include +#endif +#include +#include +#if !defined(MA_DR_FLAC_NO_SIMD) +#if defined(MA_X64) || defined(MA_X86) +#if defined(_MSC_VER) && !defined(__clang__) +#if _MSC_VER >= 1400 && !defined(MA_DR_FLAC_NO_SSE2) +#define MA_DR_FLAC_SUPPORT_SSE2 +#endif +#if _MSC_VER >= 1600 && !defined(MA_DR_FLAC_NO_SSE41) +#define MA_DR_FLAC_SUPPORT_SSE41 +#endif +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) +#if defined(__SSE2__) && !defined(MA_DR_FLAC_NO_SSE2) +#define MA_DR_FLAC_SUPPORT_SSE2 +#endif +#if defined(__SSE4_1__) && !defined(MA_DR_FLAC_NO_SSE41) +#define MA_DR_FLAC_SUPPORT_SSE41 +#endif +#endif +#if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include) +#if !defined(MA_DR_FLAC_SUPPORT_SSE2) && !defined(MA_DR_FLAC_NO_SSE2) && __has_include() +#define MA_DR_FLAC_SUPPORT_SSE2 +#endif +#if !defined(MA_DR_FLAC_SUPPORT_SSE41) && !defined(MA_DR_FLAC_NO_SSE41) && __has_include() +#define MA_DR_FLAC_SUPPORT_SSE41 +#endif +#endif +#if defined(MA_DR_FLAC_SUPPORT_SSE41) +#include +#elif defined(MA_DR_FLAC_SUPPORT_SSE2) +#include +#endif +#endif +#if defined(MA_ARM) +#if !defined(MA_DR_FLAC_NO_NEON) && (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) +#define MA_DR_FLAC_SUPPORT_NEON +#include +#endif +#endif +#endif +#if !defined(MA_DR_FLAC_NO_SIMD) && (defined(MA_X86) || defined(MA_X64)) +#if defined(_MSC_VER) && !defined(__clang__) +#if _MSC_VER >= 1400 +#include +static void ma_dr_flac__cpuid(int info[4], int fid) +{ + __cpuid(info, fid); +} +#else +#define MA_DR_FLAC_NO_CPUID +#endif +#else +#if defined(__GNUC__) || defined(__clang__) +static void ma_dr_flac__cpuid(int info[4], int fid) +{ +#if defined(MA_X86) && defined(__PIC__) + __asm__ __volatile__ ( + "xchg{l} {%%}ebx, %k1;" + "cpuid;" + "xchg{l} {%%}ebx, %k1;" + : "=a"(info[0]), "=&r"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); +#else + __asm__ __volatile__ ( + "cpuid" : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); +#endif +} +#else +#define MA_DR_FLAC_NO_CPUID +#endif +#endif +#else +#define MA_DR_FLAC_NO_CPUID +#endif +static MA_INLINE ma_bool32 ma_dr_flac_has_sse2(void) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +#if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_DR_FLAC_NO_SSE2) +#if defined(MA_X64) + return MA_TRUE; +#elif (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__) + return MA_TRUE; +#else +#if defined(MA_DR_FLAC_NO_CPUID) + return MA_FALSE; +#else + int info[4]; + ma_dr_flac__cpuid(info, 1); + return (info[3] & (1 << 26)) != 0; +#endif +#endif +#else + return MA_FALSE; +#endif +#else + return MA_FALSE; +#endif +} +static MA_INLINE ma_bool32 ma_dr_flac_has_sse41(void) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE41) +#if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_DR_FLAC_NO_SSE41) +#if defined(__SSE4_1__) || defined(__AVX__) + return MA_TRUE; +#else +#if defined(MA_DR_FLAC_NO_CPUID) + return MA_FALSE; +#else + int info[4]; + ma_dr_flac__cpuid(info, 1); + return (info[2] & (1 << 19)) != 0; +#endif +#endif +#else + return MA_FALSE; +#endif +#else + return MA_FALSE; +#endif +} +#if defined(_MSC_VER) && _MSC_VER >= 1500 && (defined(MA_X86) || defined(MA_X64)) && !defined(__clang__) +#define MA_DR_FLAC_HAS_LZCNT_INTRINSIC +#elif (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))) +#define MA_DR_FLAC_HAS_LZCNT_INTRINSIC +#elif defined(__clang__) +#if defined(__has_builtin) +#if __has_builtin(__builtin_clzll) || __has_builtin(__builtin_clzl) +#define MA_DR_FLAC_HAS_LZCNT_INTRINSIC +#endif +#endif +#endif +#if defined(_MSC_VER) && _MSC_VER >= 1400 && !defined(__clang__) +#define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC +#define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC +#define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC +#elif defined(__clang__) +#if defined(__has_builtin) +#if __has_builtin(__builtin_bswap16) +#define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC +#endif +#if __has_builtin(__builtin_bswap32) +#define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC +#endif +#if __has_builtin(__builtin_bswap64) +#define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC +#endif +#endif +#elif defined(__GNUC__) +#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) +#define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC +#define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC +#endif +#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +#define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC +#endif +#elif defined(__WATCOMC__) && defined(__386__) +#define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC +#define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC +#define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC +extern __inline ma_uint16 _watcom_bswap16(ma_uint16); +extern __inline ma_uint32 _watcom_bswap32(ma_uint32); +extern __inline ma_uint64 _watcom_bswap64(ma_uint64); +#pragma aux _watcom_bswap16 = \ + "xchg al, ah" \ + parm [ax] \ + value [ax] \ + modify nomemory; +#pragma aux _watcom_bswap32 = \ + "bswap eax" \ + parm [eax] \ + value [eax] \ + modify nomemory; +#pragma aux _watcom_bswap64 = \ + "bswap eax" \ + "bswap edx" \ + "xchg eax,edx" \ + parm [eax edx] \ + value [eax edx] \ + modify nomemory; +#endif +#ifndef MA_DR_FLAC_ASSERT +#include +#define MA_DR_FLAC_ASSERT(expression) assert(expression) +#endif +#ifndef MA_DR_FLAC_MALLOC +#define MA_DR_FLAC_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_DR_FLAC_REALLOC +#define MA_DR_FLAC_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_DR_FLAC_FREE +#define MA_DR_FLAC_FREE(p) free((p)) +#endif +#ifndef MA_DR_FLAC_COPY_MEMORY +#define MA_DR_FLAC_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_DR_FLAC_ZERO_MEMORY +#define MA_DR_FLAC_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#ifndef MA_DR_FLAC_ZERO_OBJECT +#define MA_DR_FLAC_ZERO_OBJECT(p) MA_DR_FLAC_ZERO_MEMORY((p), sizeof(*(p))) +#endif +#define MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE 64 +#define MA_DR_FLAC_SUBFRAME_CONSTANT 0 +#define MA_DR_FLAC_SUBFRAME_VERBATIM 1 +#define MA_DR_FLAC_SUBFRAME_FIXED 8 +#define MA_DR_FLAC_SUBFRAME_LPC 32 +#define MA_DR_FLAC_SUBFRAME_RESERVED 255 +#define MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE 0 +#define MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2 1 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT 0 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE 8 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE 9 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE 10 +#define MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES 18 +#define MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES 36 +#define MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES 12 +#define ma_dr_flac_align(x, a) ((((x) + (a) - 1) / (a)) * (a)) +MA_API void ma_dr_flac_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_DR_FLAC_VERSION_MAJOR; + } + if (pMinor) { + *pMinor = MA_DR_FLAC_VERSION_MINOR; + } + if (pRevision) { + *pRevision = MA_DR_FLAC_VERSION_REVISION; + } +} +MA_API const char* ma_dr_flac_version_string(void) +{ + return MA_DR_FLAC_VERSION_STRING; +} +#if defined(__has_feature) +#if __has_feature(thread_sanitizer) +#define MA_DR_FLAC_NO_THREAD_SANITIZE __attribute__((no_sanitize("thread"))) +#else +#define MA_DR_FLAC_NO_THREAD_SANITIZE +#endif +#else +#define MA_DR_FLAC_NO_THREAD_SANITIZE +#endif +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) +static ma_bool32 ma_dr_flac__gIsLZCNTSupported = MA_FALSE; +#endif +#ifndef MA_DR_FLAC_NO_CPUID +static ma_bool32 ma_dr_flac__gIsSSE2Supported = MA_FALSE; +static ma_bool32 ma_dr_flac__gIsSSE41Supported = MA_FALSE; +MA_DR_FLAC_NO_THREAD_SANITIZE static void ma_dr_flac__init_cpu_caps(void) +{ + static ma_bool32 isCPUCapsInitialized = MA_FALSE; + if (!isCPUCapsInitialized) { +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) + int info[4] = {0}; + ma_dr_flac__cpuid(info, 0x80000001); + ma_dr_flac__gIsLZCNTSupported = (info[2] & (1 << 5)) != 0; +#endif + ma_dr_flac__gIsSSE2Supported = ma_dr_flac_has_sse2(); + ma_dr_flac__gIsSSE41Supported = ma_dr_flac_has_sse41(); + isCPUCapsInitialized = MA_TRUE; + } +} +#else +static ma_bool32 ma_dr_flac__gIsNEONSupported = MA_FALSE; +static MA_INLINE ma_bool32 ma_dr_flac__has_neon(void) +{ +#if defined(MA_DR_FLAC_SUPPORT_NEON) +#if defined(MA_ARM) && !defined(MA_DR_FLAC_NO_NEON) +#if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + return MA_TRUE; +#else + return MA_FALSE; +#endif +#else + return MA_FALSE; +#endif +#else + return MA_FALSE; +#endif +} +MA_DR_FLAC_NO_THREAD_SANITIZE static void ma_dr_flac__init_cpu_caps(void) +{ + ma_dr_flac__gIsNEONSupported = ma_dr_flac__has_neon(); +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) && defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) + ma_dr_flac__gIsLZCNTSupported = MA_TRUE; +#endif +} +#endif +static MA_INLINE ma_bool32 ma_dr_flac__is_little_endian(void) +{ +#if defined(MA_X86) || defined(MA_X64) + return MA_TRUE; +#elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN + return MA_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac__swap_endian_uint16(ma_uint16 n) +{ +#ifdef MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC +#if defined(_MSC_VER) && !defined(__clang__) + return _byteswap_ushort(n); +#elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap16(n); +#elif defined(__WATCOMC__) && defined(__386__) + return _watcom_bswap16(n); +#else +#error "This compiler does not support the byte swap intrinsic." +#endif +#else + return ((n & 0xFF00) >> 8) | + ((n & 0x00FF) << 8); +#endif +} +static MA_INLINE ma_uint32 ma_dr_flac__swap_endian_uint32(ma_uint32 n) +{ +#ifdef MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC +#if defined(_MSC_VER) && !defined(__clang__) + return _byteswap_ulong(n); +#elif defined(__GNUC__) || defined(__clang__) +#if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(MA_64BIT) + ma_uint32 r; + __asm__ __volatile__ ( +#if defined(MA_64BIT) + "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) +#else + "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) +#endif + ); + return r; +#else + return __builtin_bswap32(n); +#endif +#elif defined(__WATCOMC__) && defined(__386__) + return _watcom_bswap32(n); +#else +#error "This compiler does not support the byte swap intrinsic." +#endif +#else + return ((n & 0xFF000000) >> 24) | + ((n & 0x00FF0000) >> 8) | + ((n & 0x0000FF00) << 8) | + ((n & 0x000000FF) << 24); +#endif +} +static MA_INLINE ma_uint64 ma_dr_flac__swap_endian_uint64(ma_uint64 n) +{ +#ifdef MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC +#if defined(_MSC_VER) && !defined(__clang__) + return _byteswap_uint64(n); +#elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap64(n); +#elif defined(__WATCOMC__) && defined(__386__) + return _watcom_bswap64(n); +#else +#error "This compiler does not support the byte swap intrinsic." +#endif +#else + return ((n & ((ma_uint64)0xFF000000 << 32)) >> 56) | + ((n & ((ma_uint64)0x00FF0000 << 32)) >> 40) | + ((n & ((ma_uint64)0x0000FF00 << 32)) >> 24) | + ((n & ((ma_uint64)0x000000FF << 32)) >> 8) | + ((n & ((ma_uint64)0xFF000000 )) << 8) | + ((n & ((ma_uint64)0x00FF0000 )) << 24) | + ((n & ((ma_uint64)0x0000FF00 )) << 40) | + ((n & ((ma_uint64)0x000000FF )) << 56); +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac__be2host_16(ma_uint16 n) +{ + if (ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint16(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__be2host_32(ma_uint32 n) +{ + if (ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint32(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__be2host_32_ptr_unaligned(const void* pData) +{ + const ma_uint8* pNum = (ma_uint8*)pData; + return *(pNum) << 24 | *(pNum+1) << 16 | *(pNum+2) << 8 | *(pNum+3); +} +static MA_INLINE ma_uint64 ma_dr_flac__be2host_64(ma_uint64 n) +{ + if (ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint64(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__le2host_32(ma_uint32 n) +{ + if (!ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint32(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__le2host_32_ptr_unaligned(const void* pData) +{ + const ma_uint8* pNum = (ma_uint8*)pData; + return *pNum | *(pNum+1) << 8 | *(pNum+2) << 16 | *(pNum+3) << 24; +} +static MA_INLINE ma_uint32 ma_dr_flac__unsynchsafe_32(ma_uint32 n) +{ + ma_uint32 result = 0; + result |= (n & 0x7F000000) >> 3; + result |= (n & 0x007F0000) >> 2; + result |= (n & 0x00007F00) >> 1; + result |= (n & 0x0000007F) >> 0; + return result; +} +static ma_uint8 ma_dr_flac__crc8_table[] = { + 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D, + 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65, 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, + 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5, 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD, + 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD, + 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2, 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, + 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2, 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A, + 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A, + 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42, 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, + 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C, 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4, + 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4, + 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C, 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, + 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C, 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34, + 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63, + 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B, 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, + 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB, 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83, + 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3 +}; +static ma_uint16 ma_dr_flac__crc16_table[] = { + 0x0000, 0x8005, 0x800F, 0x000A, 0x801B, 0x001E, 0x0014, 0x8011, + 0x8033, 0x0036, 0x003C, 0x8039, 0x0028, 0x802D, 0x8027, 0x0022, + 0x8063, 0x0066, 0x006C, 0x8069, 0x0078, 0x807D, 0x8077, 0x0072, + 0x0050, 0x8055, 0x805F, 0x005A, 0x804B, 0x004E, 0x0044, 0x8041, + 0x80C3, 0x00C6, 0x00CC, 0x80C9, 0x00D8, 0x80DD, 0x80D7, 0x00D2, + 0x00F0, 0x80F5, 0x80FF, 0x00FA, 0x80EB, 0x00EE, 0x00E4, 0x80E1, + 0x00A0, 0x80A5, 0x80AF, 0x00AA, 0x80BB, 0x00BE, 0x00B4, 0x80B1, + 0x8093, 0x0096, 0x009C, 0x8099, 0x0088, 0x808D, 0x8087, 0x0082, + 0x8183, 0x0186, 0x018C, 0x8189, 0x0198, 0x819D, 0x8197, 0x0192, + 0x01B0, 0x81B5, 0x81BF, 0x01BA, 0x81AB, 0x01AE, 0x01A4, 0x81A1, + 0x01E0, 0x81E5, 0x81EF, 0x01EA, 0x81FB, 0x01FE, 0x01F4, 0x81F1, + 0x81D3, 0x01D6, 0x01DC, 0x81D9, 0x01C8, 0x81CD, 0x81C7, 0x01C2, + 0x0140, 0x8145, 0x814F, 0x014A, 0x815B, 0x015E, 0x0154, 0x8151, + 0x8173, 0x0176, 0x017C, 0x8179, 0x0168, 0x816D, 0x8167, 0x0162, + 0x8123, 0x0126, 0x012C, 0x8129, 0x0138, 0x813D, 0x8137, 0x0132, + 0x0110, 0x8115, 0x811F, 0x011A, 0x810B, 0x010E, 0x0104, 0x8101, + 0x8303, 0x0306, 0x030C, 0x8309, 0x0318, 0x831D, 0x8317, 0x0312, + 0x0330, 0x8335, 0x833F, 0x033A, 0x832B, 0x032E, 0x0324, 0x8321, + 0x0360, 0x8365, 0x836F, 0x036A, 0x837B, 0x037E, 0x0374, 0x8371, + 0x8353, 0x0356, 0x035C, 0x8359, 0x0348, 0x834D, 0x8347, 0x0342, + 0x03C0, 0x83C5, 0x83CF, 0x03CA, 0x83DB, 0x03DE, 0x03D4, 0x83D1, + 0x83F3, 0x03F6, 0x03FC, 0x83F9, 0x03E8, 0x83ED, 0x83E7, 0x03E2, + 0x83A3, 0x03A6, 0x03AC, 0x83A9, 0x03B8, 0x83BD, 0x83B7, 0x03B2, + 0x0390, 0x8395, 0x839F, 0x039A, 0x838B, 0x038E, 0x0384, 0x8381, + 0x0280, 0x8285, 0x828F, 0x028A, 0x829B, 0x029E, 0x0294, 0x8291, + 0x82B3, 0x02B6, 0x02BC, 0x82B9, 0x02A8, 0x82AD, 0x82A7, 0x02A2, + 0x82E3, 0x02E6, 0x02EC, 0x82E9, 0x02F8, 0x82FD, 0x82F7, 0x02F2, + 0x02D0, 0x82D5, 0x82DF, 0x02DA, 0x82CB, 0x02CE, 0x02C4, 0x82C1, + 0x8243, 0x0246, 0x024C, 0x8249, 0x0258, 0x825D, 0x8257, 0x0252, + 0x0270, 0x8275, 0x827F, 0x027A, 0x826B, 0x026E, 0x0264, 0x8261, + 0x0220, 0x8225, 0x822F, 0x022A, 0x823B, 0x023E, 0x0234, 0x8231, + 0x8213, 0x0216, 0x021C, 0x8219, 0x0208, 0x820D, 0x8207, 0x0202 +}; +static MA_INLINE ma_uint8 ma_dr_flac_crc8_byte(ma_uint8 crc, ma_uint8 data) +{ + return ma_dr_flac__crc8_table[crc ^ data]; +} +static MA_INLINE ma_uint8 ma_dr_flac_crc8(ma_uint8 crc, ma_uint32 data, ma_uint32 count) +{ +#ifdef MA_DR_FLAC_NO_CRC + (void)crc; + (void)data; + (void)count; + return 0; +#else +#if 0 + ma_uint8 p = 0x07; + for (int i = count-1; i >= 0; --i) { + ma_uint8 bit = (data & (1 << i)) >> i; + if (crc & 0x80) { + crc = ((crc << 1) | bit) ^ p; + } else { + crc = ((crc << 1) | bit); + } + } + return crc; +#else + ma_uint32 wholeBytes; + ma_uint32 leftoverBits; + ma_uint64 leftoverDataMask; + static ma_uint64 leftoverDataMaskTable[8] = { + 0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F + }; + MA_DR_FLAC_ASSERT(count <= 32); + wholeBytes = count >> 3; + leftoverBits = count - (wholeBytes*8); + leftoverDataMask = leftoverDataMaskTable[leftoverBits]; + switch (wholeBytes) { + case 4: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0xFF000000UL << leftoverBits)) >> (24 + leftoverBits))); + case 3: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0x00FF0000UL << leftoverBits)) >> (16 + leftoverBits))); + case 2: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0x0000FF00UL << leftoverBits)) >> ( 8 + leftoverBits))); + case 1: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0x000000FFUL << leftoverBits)) >> ( 0 + leftoverBits))); + case 0: if (leftoverBits > 0) crc = (ma_uint8)((crc << leftoverBits) ^ ma_dr_flac__crc8_table[(crc >> (8 - leftoverBits)) ^ (data & leftoverDataMask)]); + } + return crc; +#endif +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16_byte(ma_uint16 crc, ma_uint8 data) +{ + return (crc << 8) ^ ma_dr_flac__crc16_table[(ma_uint8)(crc >> 8) ^ data]; +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16_cache(ma_uint16 crc, ma_dr_flac_cache_t data) +{ +#ifdef MA_64BIT + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 56) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 48) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 40) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 32) & 0xFF)); +#endif + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 24) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 16) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 8) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 0) & 0xFF)); + return crc; +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16_bytes(ma_uint16 crc, ma_dr_flac_cache_t data, ma_uint32 byteCount) +{ + switch (byteCount) + { +#ifdef MA_64BIT + case 8: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 56) & 0xFF)); + case 7: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 48) & 0xFF)); + case 6: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 40) & 0xFF)); + case 5: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 32) & 0xFF)); +#endif + case 4: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 24) & 0xFF)); + case 3: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 16) & 0xFF)); + case 2: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 8) & 0xFF)); + case 1: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 0) & 0xFF)); + } + return crc; +} +#if 0 +static MA_INLINE ma_uint16 ma_dr_flac_crc16__32bit(ma_uint16 crc, ma_uint32 data, ma_uint32 count) +{ +#ifdef MA_DR_FLAC_NO_CRC + (void)crc; + (void)data; + (void)count; + return 0; +#else +#if 0 + ma_uint16 p = 0x8005; + for (int i = count-1; i >= 0; --i) { + ma_uint16 bit = (data & (1ULL << i)) >> i; + if (r & 0x8000) { + r = ((r << 1) | bit) ^ p; + } else { + r = ((r << 1) | bit); + } + } + return crc; +#else + ma_uint32 wholeBytes; + ma_uint32 leftoverBits; + ma_uint64 leftoverDataMask; + static ma_uint64 leftoverDataMaskTable[8] = { + 0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F + }; + MA_DR_FLAC_ASSERT(count <= 64); + wholeBytes = count >> 3; + leftoverBits = count & 7; + leftoverDataMask = leftoverDataMaskTable[leftoverBits]; + switch (wholeBytes) { + default: + case 4: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0xFF000000UL << leftoverBits)) >> (24 + leftoverBits))); + case 3: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0x00FF0000UL << leftoverBits)) >> (16 + leftoverBits))); + case 2: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0x0000FF00UL << leftoverBits)) >> ( 8 + leftoverBits))); + case 1: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0x000000FFUL << leftoverBits)) >> ( 0 + leftoverBits))); + case 0: if (leftoverBits > 0) crc = (crc << leftoverBits) ^ ma_dr_flac__crc16_table[(crc >> (16 - leftoverBits)) ^ (data & leftoverDataMask)]; + } + return crc; +#endif +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16__64bit(ma_uint16 crc, ma_uint64 data, ma_uint32 count) +{ +#ifdef MA_DR_FLAC_NO_CRC + (void)crc; + (void)data; + (void)count; + return 0; +#else + ma_uint32 wholeBytes; + ma_uint32 leftoverBits; + ma_uint64 leftoverDataMask; + static ma_uint64 leftoverDataMaskTable[8] = { + 0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F + }; + MA_DR_FLAC_ASSERT(count <= 64); + wholeBytes = count >> 3; + leftoverBits = count & 7; + leftoverDataMask = leftoverDataMaskTable[leftoverBits]; + switch (wholeBytes) { + default: + case 8: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0xFF000000 << 32) << leftoverBits)) >> (56 + leftoverBits))); + case 7: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x00FF0000 << 32) << leftoverBits)) >> (48 + leftoverBits))); + case 6: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x0000FF00 << 32) << leftoverBits)) >> (40 + leftoverBits))); + case 5: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x000000FF << 32) << leftoverBits)) >> (32 + leftoverBits))); + case 4: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0xFF000000 ) << leftoverBits)) >> (24 + leftoverBits))); + case 3: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x00FF0000 ) << leftoverBits)) >> (16 + leftoverBits))); + case 2: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x0000FF00 ) << leftoverBits)) >> ( 8 + leftoverBits))); + case 1: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x000000FF ) << leftoverBits)) >> ( 0 + leftoverBits))); + case 0: if (leftoverBits > 0) crc = (crc << leftoverBits) ^ ma_dr_flac__crc16_table[(crc >> (16 - leftoverBits)) ^ (data & leftoverDataMask)]; + } + return crc; +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16(ma_uint16 crc, ma_dr_flac_cache_t data, ma_uint32 count) +{ +#ifdef MA_64BIT + return ma_dr_flac_crc16__64bit(crc, data, count); +#else + return ma_dr_flac_crc16__32bit(crc, data, count); +#endif +} +#endif +#ifdef MA_64BIT +#define ma_dr_flac__be2host__cache_line ma_dr_flac__be2host_64 +#else +#define ma_dr_flac__be2host__cache_line ma_dr_flac__be2host_32 +#endif +#define MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs) (sizeof((bs)->cache)) +#define MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) (sizeof((bs)->cache)*8) +#define MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - (bs)->consumedBits) +#define MA_DR_FLAC_CACHE_L1_SELECTION_MASK(_bitCount) (~((~(ma_dr_flac_cache_t)0) >> (_bitCount))) +#define MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, _bitCount) (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - (_bitCount)) +#define MA_DR_FLAC_CACHE_L1_SELECT(bs, _bitCount) (((bs)->cache) & MA_DR_FLAC_CACHE_L1_SELECTION_MASK(_bitCount)) +#define MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, _bitCount) (MA_DR_FLAC_CACHE_L1_SELECT((bs), (_bitCount)) >> MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT((bs), (_bitCount))) +#define MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT_SAFE(bs, _bitCount)(MA_DR_FLAC_CACHE_L1_SELECT((bs), (_bitCount)) >> (MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT((bs), (_bitCount)) & (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)-1))) +#define MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs) (sizeof((bs)->cacheL2)) +#define MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs) (MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs) / sizeof((bs)->cacheL2[0])) +#define MA_DR_FLAC_CACHE_L2_LINES_REMAINING(bs) (MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs) - (bs)->nextL2Line) +#ifndef MA_DR_FLAC_NO_CRC +static MA_INLINE void ma_dr_flac__reset_crc16(ma_dr_flac_bs* bs) +{ + bs->crc16 = 0; + bs->crc16CacheIgnoredBytes = bs->consumedBits >> 3; +} +static MA_INLINE void ma_dr_flac__update_crc16(ma_dr_flac_bs* bs) +{ + if (bs->crc16CacheIgnoredBytes == 0) { + bs->crc16 = ma_dr_flac_crc16_cache(bs->crc16, bs->crc16Cache); + } else { + bs->crc16 = ma_dr_flac_crc16_bytes(bs->crc16, bs->crc16Cache, MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs) - bs->crc16CacheIgnoredBytes); + bs->crc16CacheIgnoredBytes = 0; + } +} +static MA_INLINE ma_uint16 ma_dr_flac__flush_crc16(ma_dr_flac_bs* bs) +{ + MA_DR_FLAC_ASSERT((MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) & 7) == 0); + if (MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) == 0) { + ma_dr_flac__update_crc16(bs); + } else { + bs->crc16 = ma_dr_flac_crc16_bytes(bs->crc16, bs->crc16Cache >> MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs), (bs->consumedBits >> 3) - bs->crc16CacheIgnoredBytes); + bs->crc16CacheIgnoredBytes = bs->consumedBits >> 3; + } + return bs->crc16; +} +#endif +static MA_INLINE ma_bool32 ma_dr_flac__reload_l1_cache_from_l2(ma_dr_flac_bs* bs) +{ + size_t bytesRead; + size_t alignedL1LineCount; + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { + bs->cache = bs->cacheL2[bs->nextL2Line++]; + return MA_TRUE; + } + if (bs->unalignedByteCount > 0) { + return MA_FALSE; + } + bytesRead = bs->onRead(bs->pUserData, bs->cacheL2, MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs)); + bs->nextL2Line = 0; + if (bytesRead == MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs)) { + bs->cache = bs->cacheL2[bs->nextL2Line++]; + return MA_TRUE; + } + alignedL1LineCount = bytesRead / MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs); + bs->unalignedByteCount = bytesRead - (alignedL1LineCount * MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs)); + if (bs->unalignedByteCount > 0) { + bs->unalignedCache = bs->cacheL2[alignedL1LineCount]; + } + if (alignedL1LineCount > 0) { + size_t offset = MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs) - alignedL1LineCount; + size_t i; + for (i = alignedL1LineCount; i > 0; --i) { + bs->cacheL2[i-1 + offset] = bs->cacheL2[i-1]; + } + bs->nextL2Line = (ma_uint32)offset; + bs->cache = bs->cacheL2[bs->nextL2Line++]; + return MA_TRUE; + } else { + bs->nextL2Line = MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs); + return MA_FALSE; + } +} +static ma_bool32 ma_dr_flac__reload_cache(ma_dr_flac_bs* bs) +{ + size_t bytesRead; +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + if (ma_dr_flac__reload_l1_cache_from_l2(bs)) { + bs->cache = ma_dr_flac__be2host__cache_line(bs->cache); + bs->consumedBits = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs->cache; +#endif + return MA_TRUE; + } + bytesRead = bs->unalignedByteCount; + if (bytesRead == 0) { + bs->consumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + return MA_FALSE; + } + MA_DR_FLAC_ASSERT(bytesRead < MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs)); + bs->consumedBits = (ma_uint32)(MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs) - bytesRead) * 8; + bs->cache = ma_dr_flac__be2host__cache_line(bs->unalignedCache); + bs->cache &= MA_DR_FLAC_CACHE_L1_SELECTION_MASK(MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)); + bs->unalignedByteCount = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs->cache >> bs->consumedBits; + bs->crc16CacheIgnoredBytes = bs->consumedBits >> 3; +#endif + return MA_TRUE; +} +static void ma_dr_flac__reset_cache(ma_dr_flac_bs* bs) +{ + bs->nextL2Line = MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs); + bs->consumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + bs->cache = 0; + bs->unalignedByteCount = 0; + bs->unalignedCache = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = 0; + bs->crc16CacheIgnoredBytes = 0; +#endif +} +static MA_INLINE ma_bool32 ma_dr_flac__read_uint32(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint32* pResultOut) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResultOut != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 32); + if (bs->consumedBits == MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + } + if (bitCount <= MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { +#ifdef MA_64BIT + *pResultOut = (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCount); + bs->consumedBits += bitCount; + bs->cache <<= bitCount; +#else + if (bitCount < MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + *pResultOut = (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCount); + bs->consumedBits += bitCount; + bs->cache <<= bitCount; + } else { + *pResultOut = (ma_uint32)bs->cache; + bs->consumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + bs->cache = 0; + } +#endif + return MA_TRUE; + } else { + ma_uint32 bitCountHi = MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + ma_uint32 bitCountLo = bitCount - bitCountHi; + ma_uint32 resultHi; + MA_DR_FLAC_ASSERT(bitCountHi > 0); + MA_DR_FLAC_ASSERT(bitCountHi < 32); + resultHi = (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCountHi); + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (bitCountLo > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + *pResultOut = (resultHi << bitCountLo) | (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCountLo); + bs->consumedBits += bitCountLo; + bs->cache <<= bitCountLo; + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac__read_int32(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int32* pResult) +{ + ma_uint32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 32); + if (!ma_dr_flac__read_uint32(bs, bitCount, &result)) { + return MA_FALSE; + } + if (bitCount < 32) { + ma_uint32 signbit; + signbit = ((result >> (bitCount-1)) & 0x01); + result |= (~signbit + 1) << bitCount; + } + *pResult = (ma_int32)result; + return MA_TRUE; +} +#ifdef MA_64BIT +static ma_bool32 ma_dr_flac__read_uint64(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint64* pResultOut) +{ + ma_uint32 resultHi; + ma_uint32 resultLo; + MA_DR_FLAC_ASSERT(bitCount <= 64); + MA_DR_FLAC_ASSERT(bitCount > 32); + if (!ma_dr_flac__read_uint32(bs, bitCount - 32, &resultHi)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint32(bs, 32, &resultLo)) { + return MA_FALSE; + } + *pResultOut = (((ma_uint64)resultHi) << 32) | ((ma_uint64)resultLo); + return MA_TRUE; +} +#endif +#if 0 +static ma_bool32 ma_dr_flac__read_int64(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int64* pResultOut) +{ + ma_uint64 result; + ma_uint64 signbit; + MA_DR_FLAC_ASSERT(bitCount <= 64); + if (!ma_dr_flac__read_uint64(bs, bitCount, &result)) { + return MA_FALSE; + } + signbit = ((result >> (bitCount-1)) & 0x01); + result |= (~signbit + 1) << bitCount; + *pResultOut = (ma_int64)result; + return MA_TRUE; +} +#endif +static ma_bool32 ma_dr_flac__read_uint16(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint16* pResult) +{ + ma_uint32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 16); + if (!ma_dr_flac__read_uint32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_uint16)result; + return MA_TRUE; +} +#if 0 +static ma_bool32 ma_dr_flac__read_int16(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int16* pResult) +{ + ma_int32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 16); + if (!ma_dr_flac__read_int32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_int16)result; + return MA_TRUE; +} +#endif +static ma_bool32 ma_dr_flac__read_uint8(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint8* pResult) +{ + ma_uint32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 8); + if (!ma_dr_flac__read_uint32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_uint8)result; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_int8(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int8* pResult) +{ + ma_int32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 8); + if (!ma_dr_flac__read_int32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_int8)result; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__seek_bits(ma_dr_flac_bs* bs, size_t bitsToSeek) +{ + if (bitsToSeek <= MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + bs->consumedBits += (ma_uint32)bitsToSeek; + bs->cache <<= bitsToSeek; + return MA_TRUE; + } else { + bitsToSeek -= MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + bs->consumedBits += MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + bs->cache = 0; +#ifdef MA_64BIT + while (bitsToSeek >= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + ma_uint64 bin; + if (!ma_dr_flac__read_uint64(bs, MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs), &bin)) { + return MA_FALSE; + } + bitsToSeek -= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + } +#else + while (bitsToSeek >= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + ma_uint32 bin; + if (!ma_dr_flac__read_uint32(bs, MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs), &bin)) { + return MA_FALSE; + } + bitsToSeek -= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + } +#endif + while (bitsToSeek >= 8) { + ma_uint8 bin; + if (!ma_dr_flac__read_uint8(bs, 8, &bin)) { + return MA_FALSE; + } + bitsToSeek -= 8; + } + if (bitsToSeek > 0) { + ma_uint8 bin; + if (!ma_dr_flac__read_uint8(bs, (ma_uint32)bitsToSeek, &bin)) { + return MA_FALSE; + } + bitsToSeek = 0; + } + MA_DR_FLAC_ASSERT(bitsToSeek == 0); + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac__find_and_seek_to_next_sync_code(ma_dr_flac_bs* bs) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + if (!ma_dr_flac__seek_bits(bs, MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) & 7)) { + return MA_FALSE; + } + for (;;) { + ma_uint8 hi; +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__reset_crc16(bs); +#endif + if (!ma_dr_flac__read_uint8(bs, 8, &hi)) { + return MA_FALSE; + } + if (hi == 0xFF) { + ma_uint8 lo; + if (!ma_dr_flac__read_uint8(bs, 6, &lo)) { + return MA_FALSE; + } + if (lo == 0x3E) { + return MA_TRUE; + } else { + if (!ma_dr_flac__seek_bits(bs, MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) & 7)) { + return MA_FALSE; + } + } + } + } +} +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) +#define MA_DR_FLAC_IMPLEMENT_CLZ_LZCNT +#endif +#if defined(_MSC_VER) && _MSC_VER >= 1400 && (defined(MA_X64) || defined(MA_X86)) && !defined(__clang__) +#define MA_DR_FLAC_IMPLEMENT_CLZ_MSVC +#endif +#if defined(__WATCOMC__) && defined(__386__) +#define MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM +#endif +#ifdef __MRC__ +#include +#define MA_DR_FLAC_IMPLEMENT_CLZ_MRC +#endif +static MA_INLINE ma_uint32 ma_dr_flac__clz_software(ma_dr_flac_cache_t x) +{ + ma_uint32 n; + static ma_uint32 clz_table_4[] = { + 0, + 4, + 3, 3, + 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1 + }; + if (x == 0) { + return sizeof(x)*8; + } + n = clz_table_4[x >> (sizeof(x)*8 - 4)]; + if (n == 0) { +#ifdef MA_64BIT + if ((x & ((ma_uint64)0xFFFFFFFF << 32)) == 0) { n = 32; x <<= 32; } + if ((x & ((ma_uint64)0xFFFF0000 << 32)) == 0) { n += 16; x <<= 16; } + if ((x & ((ma_uint64)0xFF000000 << 32)) == 0) { n += 8; x <<= 8; } + if ((x & ((ma_uint64)0xF0000000 << 32)) == 0) { n += 4; x <<= 4; } +#else + if ((x & 0xFFFF0000) == 0) { n = 16; x <<= 16; } + if ((x & 0xFF000000) == 0) { n += 8; x <<= 8; } + if ((x & 0xF0000000) == 0) { n += 4; x <<= 4; } +#endif + n += clz_table_4[x >> (sizeof(x)*8 - 4)]; + } + return n - 1; +} +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_LZCNT +static MA_INLINE ma_bool32 ma_dr_flac__is_lzcnt_supported(void) +{ +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) && defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) + return MA_TRUE; +#elif defined(__MRC__) + return MA_TRUE; +#else +#ifdef MA_DR_FLAC_HAS_LZCNT_INTRINSIC + return ma_dr_flac__gIsLZCNTSupported; +#else + return MA_FALSE; +#endif +#endif +} +static MA_INLINE ma_uint32 ma_dr_flac__clz_lzcnt(ma_dr_flac_cache_t x) +{ +#if defined(_MSC_VER) +#ifdef MA_64BIT + return (ma_uint32)__lzcnt64(x); +#else + return (ma_uint32)__lzcnt(x); +#endif +#else +#if defined(__GNUC__) || defined(__clang__) +#if defined(MA_X64) + { + ma_uint64 r; + __asm__ __volatile__ ( + "lzcnt{ %1, %0| %0, %1}" : "=r"(r) : "r"(x) : "cc" + ); + return (ma_uint32)r; + } +#elif defined(MA_X86) + { + ma_uint32 r; + __asm__ __volatile__ ( + "lzcnt{l %1, %0| %0, %1}" : "=r"(r) : "r"(x) : "cc" + ); + return r; + } +#elif defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) && !defined(MA_64BIT) + { + unsigned int r; + __asm__ __volatile__ ( +#if defined(MA_64BIT) + "clz %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(x) +#else + "clz %[out], %[in]" : [out]"=r"(r) : [in]"r"(x) +#endif + ); + return r; + } +#else + if (x == 0) { + return sizeof(x)*8; + } +#ifdef MA_64BIT + return (ma_uint32)__builtin_clzll((ma_uint64)x); +#else + return (ma_uint32)__builtin_clzl((ma_uint32)x); +#endif +#endif +#else +#error "This compiler does not support the lzcnt intrinsic." +#endif +#endif +} +#endif +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_MSVC +#include +static MA_INLINE ma_uint32 ma_dr_flac__clz_msvc(ma_dr_flac_cache_t x) +{ + ma_uint32 n; + if (x == 0) { + return sizeof(x)*8; + } +#ifdef MA_64BIT + _BitScanReverse64((unsigned long*)&n, x); +#else + _BitScanReverse((unsigned long*)&n, x); +#endif + return sizeof(x)*8 - n - 1; +} +#endif +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM +static __inline ma_uint32 ma_dr_flac__clz_watcom (ma_uint32); +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM_LZCNT +#pragma aux ma_dr_flac__clz_watcom_lzcnt = \ + "db 0F3h, 0Fh, 0BDh, 0C0h" \ + parm [eax] \ + value [eax] \ + modify nomemory; +#else +#pragma aux ma_dr_flac__clz_watcom = \ + "bsr eax, eax" \ + "xor eax, 31" \ + parm [eax] nomemory \ + value [eax] \ + modify exact [eax] nomemory; +#endif +#endif +static MA_INLINE ma_uint32 ma_dr_flac__clz(ma_dr_flac_cache_t x) +{ +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_LZCNT + if (ma_dr_flac__is_lzcnt_supported()) { + return ma_dr_flac__clz_lzcnt(x); + } else +#endif + { +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_MSVC + return ma_dr_flac__clz_msvc(x); +#elif defined(MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM_LZCNT) + return ma_dr_flac__clz_watcom_lzcnt(x); +#elif defined(MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM) + return (x == 0) ? sizeof(x)*8 : ma_dr_flac__clz_watcom(x); +#elif defined(__MRC__) + return __cntlzw(x); +#else + return ma_dr_flac__clz_software(x); +#endif + } +} +static MA_INLINE ma_bool32 ma_dr_flac__seek_past_next_set_bit(ma_dr_flac_bs* bs, unsigned int* pOffsetOut) +{ + ma_uint32 zeroCounter = 0; + ma_uint32 setBitOffsetPlus1; + while (bs->cache == 0) { + zeroCounter += (ma_uint32)MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + } + if (bs->cache == 1) { + *pOffsetOut = zeroCounter + (ma_uint32)MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) - 1; + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + return MA_TRUE; + } + setBitOffsetPlus1 = ma_dr_flac__clz(bs->cache); + setBitOffsetPlus1 += 1; + if (setBitOffsetPlus1 > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + bs->consumedBits += setBitOffsetPlus1; + bs->cache <<= setBitOffsetPlus1; + *pOffsetOut = zeroCounter + setBitOffsetPlus1 - 1; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__seek_to_byte(ma_dr_flac_bs* bs, ma_uint64 offsetFromStart) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(offsetFromStart > 0); + if (offsetFromStart > 0x7FFFFFFF) { + ma_uint64 bytesRemaining = offsetFromStart; + if (!bs->onSeek(bs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + bytesRemaining -= 0x7FFFFFFF; + while (bytesRemaining > 0x7FFFFFFF) { + if (!bs->onSeek(bs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + bytesRemaining -= 0x7FFFFFFF; + } + if (bytesRemaining > 0) { + if (!bs->onSeek(bs->pUserData, (int)bytesRemaining, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + } else { + if (!bs->onSeek(bs->pUserData, (int)offsetFromStart, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + } + ma_dr_flac__reset_cache(bs); + return MA_TRUE; +} +static ma_result ma_dr_flac__read_utf8_coded_number(ma_dr_flac_bs* bs, ma_uint64* pNumberOut, ma_uint8* pCRCOut) +{ + ma_uint8 crc; + ma_uint64 result; + ma_uint8 utf8[7] = {0}; + int byteCount; + int i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pNumberOut != NULL); + MA_DR_FLAC_ASSERT(pCRCOut != NULL); + crc = *pCRCOut; + if (!ma_dr_flac__read_uint8(bs, 8, utf8)) { + *pNumberOut = 0; + return MA_AT_END; + } + crc = ma_dr_flac_crc8(crc, utf8[0], 8); + if ((utf8[0] & 0x80) == 0) { + *pNumberOut = utf8[0]; + *pCRCOut = crc; + return MA_SUCCESS; + } + if ((utf8[0] & 0xE0) == 0xC0) { + byteCount = 2; + } else if ((utf8[0] & 0xF0) == 0xE0) { + byteCount = 3; + } else if ((utf8[0] & 0xF8) == 0xF0) { + byteCount = 4; + } else if ((utf8[0] & 0xFC) == 0xF8) { + byteCount = 5; + } else if ((utf8[0] & 0xFE) == 0xFC) { + byteCount = 6; + } else if ((utf8[0] & 0xFF) == 0xFE) { + byteCount = 7; + } else { + *pNumberOut = 0; + return MA_CRC_MISMATCH; + } + MA_DR_FLAC_ASSERT(byteCount > 1); + result = (ma_uint64)(utf8[0] & (0xFF >> (byteCount + 1))); + for (i = 1; i < byteCount; ++i) { + if (!ma_dr_flac__read_uint8(bs, 8, utf8 + i)) { + *pNumberOut = 0; + return MA_AT_END; + } + crc = ma_dr_flac_crc8(crc, utf8[i], 8); + result = (result << 6) | (utf8[i] & 0x3F); + } + *pNumberOut = result; + *pCRCOut = crc; + return MA_SUCCESS; +} +static MA_INLINE ma_uint32 ma_dr_flac__ilog2_u32(ma_uint32 x) +{ +#if 1 + ma_uint32 result = 0; + while (x > 0) { + result += 1; + x >>= 1; + } + return result; +#endif +} +static MA_INLINE ma_bool32 ma_dr_flac__use_64_bit_prediction(ma_uint32 bitsPerSample, ma_uint32 order, ma_uint32 precision) +{ + return bitsPerSample + precision + ma_dr_flac__ilog2_u32(order) > 32; +} +#if defined(__clang__) +__attribute__((no_sanitize("signed-integer-overflow"))) +#endif +static MA_INLINE ma_int32 ma_dr_flac__calculate_prediction_32(ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pDecodedSamples) +{ + ma_int32 prediction = 0; + MA_DR_FLAC_ASSERT(order <= 32); + switch (order) + { + case 32: prediction += coefficients[31] * pDecodedSamples[-32]; + case 31: prediction += coefficients[30] * pDecodedSamples[-31]; + case 30: prediction += coefficients[29] * pDecodedSamples[-30]; + case 29: prediction += coefficients[28] * pDecodedSamples[-29]; + case 28: prediction += coefficients[27] * pDecodedSamples[-28]; + case 27: prediction += coefficients[26] * pDecodedSamples[-27]; + case 26: prediction += coefficients[25] * pDecodedSamples[-26]; + case 25: prediction += coefficients[24] * pDecodedSamples[-25]; + case 24: prediction += coefficients[23] * pDecodedSamples[-24]; + case 23: prediction += coefficients[22] * pDecodedSamples[-23]; + case 22: prediction += coefficients[21] * pDecodedSamples[-22]; + case 21: prediction += coefficients[20] * pDecodedSamples[-21]; + case 20: prediction += coefficients[19] * pDecodedSamples[-20]; + case 19: prediction += coefficients[18] * pDecodedSamples[-19]; + case 18: prediction += coefficients[17] * pDecodedSamples[-18]; + case 17: prediction += coefficients[16] * pDecodedSamples[-17]; + case 16: prediction += coefficients[15] * pDecodedSamples[-16]; + case 15: prediction += coefficients[14] * pDecodedSamples[-15]; + case 14: prediction += coefficients[13] * pDecodedSamples[-14]; + case 13: prediction += coefficients[12] * pDecodedSamples[-13]; + case 12: prediction += coefficients[11] * pDecodedSamples[-12]; + case 11: prediction += coefficients[10] * pDecodedSamples[-11]; + case 10: prediction += coefficients[ 9] * pDecodedSamples[-10]; + case 9: prediction += coefficients[ 8] * pDecodedSamples[- 9]; + case 8: prediction += coefficients[ 7] * pDecodedSamples[- 8]; + case 7: prediction += coefficients[ 6] * pDecodedSamples[- 7]; + case 6: prediction += coefficients[ 5] * pDecodedSamples[- 6]; + case 5: prediction += coefficients[ 4] * pDecodedSamples[- 5]; + case 4: prediction += coefficients[ 3] * pDecodedSamples[- 4]; + case 3: prediction += coefficients[ 2] * pDecodedSamples[- 3]; + case 2: prediction += coefficients[ 1] * pDecodedSamples[- 2]; + case 1: prediction += coefficients[ 0] * pDecodedSamples[- 1]; + } + return (ma_int32)(prediction >> shift); +} +static MA_INLINE ma_int32 ma_dr_flac__calculate_prediction_64(ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pDecodedSamples) +{ + ma_int64 prediction; + MA_DR_FLAC_ASSERT(order <= 32); +#ifndef MA_64BIT + if (order == 8) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + } + else if (order == 7) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + } + else if (order == 3) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + } + else if (order == 6) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + } + else if (order == 5) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + } + else if (order == 4) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + } + else if (order == 12) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + prediction += coefficients[9] * (ma_int64)pDecodedSamples[-10]; + prediction += coefficients[10] * (ma_int64)pDecodedSamples[-11]; + prediction += coefficients[11] * (ma_int64)pDecodedSamples[-12]; + } + else if (order == 2) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + } + else if (order == 1) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + } + else if (order == 10) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + prediction += coefficients[9] * (ma_int64)pDecodedSamples[-10]; + } + else if (order == 9) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + } + else if (order == 11) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + prediction += coefficients[9] * (ma_int64)pDecodedSamples[-10]; + prediction += coefficients[10] * (ma_int64)pDecodedSamples[-11]; + } + else + { + int j; + prediction = 0; + for (j = 0; j < (int)order; ++j) { + prediction += coefficients[j] * (ma_int64)pDecodedSamples[-j-1]; + } + } +#endif +#ifdef MA_64BIT + prediction = 0; + switch (order) + { + case 32: prediction += coefficients[31] * (ma_int64)pDecodedSamples[-32]; + case 31: prediction += coefficients[30] * (ma_int64)pDecodedSamples[-31]; + case 30: prediction += coefficients[29] * (ma_int64)pDecodedSamples[-30]; + case 29: prediction += coefficients[28] * (ma_int64)pDecodedSamples[-29]; + case 28: prediction += coefficients[27] * (ma_int64)pDecodedSamples[-28]; + case 27: prediction += coefficients[26] * (ma_int64)pDecodedSamples[-27]; + case 26: prediction += coefficients[25] * (ma_int64)pDecodedSamples[-26]; + case 25: prediction += coefficients[24] * (ma_int64)pDecodedSamples[-25]; + case 24: prediction += coefficients[23] * (ma_int64)pDecodedSamples[-24]; + case 23: prediction += coefficients[22] * (ma_int64)pDecodedSamples[-23]; + case 22: prediction += coefficients[21] * (ma_int64)pDecodedSamples[-22]; + case 21: prediction += coefficients[20] * (ma_int64)pDecodedSamples[-21]; + case 20: prediction += coefficients[19] * (ma_int64)pDecodedSamples[-20]; + case 19: prediction += coefficients[18] * (ma_int64)pDecodedSamples[-19]; + case 18: prediction += coefficients[17] * (ma_int64)pDecodedSamples[-18]; + case 17: prediction += coefficients[16] * (ma_int64)pDecodedSamples[-17]; + case 16: prediction += coefficients[15] * (ma_int64)pDecodedSamples[-16]; + case 15: prediction += coefficients[14] * (ma_int64)pDecodedSamples[-15]; + case 14: prediction += coefficients[13] * (ma_int64)pDecodedSamples[-14]; + case 13: prediction += coefficients[12] * (ma_int64)pDecodedSamples[-13]; + case 12: prediction += coefficients[11] * (ma_int64)pDecodedSamples[-12]; + case 11: prediction += coefficients[10] * (ma_int64)pDecodedSamples[-11]; + case 10: prediction += coefficients[ 9] * (ma_int64)pDecodedSamples[-10]; + case 9: prediction += coefficients[ 8] * (ma_int64)pDecodedSamples[- 9]; + case 8: prediction += coefficients[ 7] * (ma_int64)pDecodedSamples[- 8]; + case 7: prediction += coefficients[ 6] * (ma_int64)pDecodedSamples[- 7]; + case 6: prediction += coefficients[ 5] * (ma_int64)pDecodedSamples[- 6]; + case 5: prediction += coefficients[ 4] * (ma_int64)pDecodedSamples[- 5]; + case 4: prediction += coefficients[ 3] * (ma_int64)pDecodedSamples[- 4]; + case 3: prediction += coefficients[ 2] * (ma_int64)pDecodedSamples[- 3]; + case 2: prediction += coefficients[ 1] * (ma_int64)pDecodedSamples[- 2]; + case 1: prediction += coefficients[ 0] * (ma_int64)pDecodedSamples[- 1]; + } +#endif + return (ma_int32)(prediction >> shift); +} +#if 0 +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__reference(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + for (i = 0; i < count; ++i) { + ma_uint32 zeroCounter = 0; + for (;;) { + ma_uint8 bit; + if (!ma_dr_flac__read_uint8(bs, 1, &bit)) { + return MA_FALSE; + } + if (bit == 0) { + zeroCounter += 1; + } else { + break; + } + } + ma_uint32 decodedRice; + if (riceParam > 0) { + if (!ma_dr_flac__read_uint32(bs, riceParam, &decodedRice)) { + return MA_FALSE; + } + } else { + decodedRice = 0; + } + decodedRice |= (zeroCounter << riceParam); + if ((decodedRice & 0x01)) { + decodedRice = ~(decodedRice >> 1); + } else { + decodedRice = (decodedRice >> 1); + } + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + pSamplesOut[i] = decodedRice + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } else { + pSamplesOut[i] = decodedRice + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } + } + return MA_TRUE; +} +#endif +#if 0 +static ma_bool32 ma_dr_flac__read_rice_parts__reference(ma_dr_flac_bs* bs, ma_uint8 riceParam, ma_uint32* pZeroCounterOut, ma_uint32* pRiceParamPartOut) +{ + ma_uint32 zeroCounter = 0; + ma_uint32 decodedRice; + for (;;) { + ma_uint8 bit; + if (!ma_dr_flac__read_uint8(bs, 1, &bit)) { + return MA_FALSE; + } + if (bit == 0) { + zeroCounter += 1; + } else { + break; + } + } + if (riceParam > 0) { + if (!ma_dr_flac__read_uint32(bs, riceParam, &decodedRice)) { + return MA_FALSE; + } + } else { + decodedRice = 0; + } + *pZeroCounterOut = zeroCounter; + *pRiceParamPartOut = decodedRice; + return MA_TRUE; +} +#endif +#if 0 +static MA_INLINE ma_bool32 ma_dr_flac__read_rice_parts(ma_dr_flac_bs* bs, ma_uint8 riceParam, ma_uint32* pZeroCounterOut, ma_uint32* pRiceParamPartOut) +{ + ma_dr_flac_cache_t riceParamMask; + ma_uint32 zeroCounter; + ma_uint32 setBitOffsetPlus1; + ma_uint32 riceParamPart; + ma_uint32 riceLength; + MA_DR_FLAC_ASSERT(riceParam > 0); + riceParamMask = MA_DR_FLAC_CACHE_L1_SELECTION_MASK(riceParam); + zeroCounter = 0; + while (bs->cache == 0) { + zeroCounter += (ma_uint32)MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + } + setBitOffsetPlus1 = ma_dr_flac__clz(bs->cache); + zeroCounter += setBitOffsetPlus1; + setBitOffsetPlus1 += 1; + riceLength = setBitOffsetPlus1 + riceParam; + if (riceLength < MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + riceParamPart = (ma_uint32)((bs->cache & (riceParamMask >> setBitOffsetPlus1)) >> MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, riceLength)); + bs->consumedBits += riceLength; + bs->cache <<= riceLength; + } else { + ma_uint32 bitCountLo; + ma_dr_flac_cache_t resultHi; + bs->consumedBits += riceLength; + bs->cache <<= setBitOffsetPlus1 & (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)-1); + bitCountLo = bs->consumedBits - MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + resultHi = MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, riceParam); + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + bs->cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs->consumedBits = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs->cache; +#endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (bitCountLo > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + } + riceParamPart = (ma_uint32)(resultHi | MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT_SAFE(bs, bitCountLo)); + bs->consumedBits += bitCountLo; + bs->cache <<= bitCountLo; + } + pZeroCounterOut[0] = zeroCounter; + pRiceParamPartOut[0] = riceParamPart; + return MA_TRUE; +} +#endif +static MA_INLINE ma_bool32 ma_dr_flac__read_rice_parts_x1(ma_dr_flac_bs* bs, ma_uint8 riceParam, ma_uint32* pZeroCounterOut, ma_uint32* pRiceParamPartOut) +{ + ma_uint32 riceParamPlus1 = riceParam + 1; + ma_uint32 riceParamPlus1Shift = MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, riceParamPlus1); + ma_uint32 riceParamPlus1MaxConsumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - riceParamPlus1; + ma_dr_flac_cache_t bs_cache = bs->cache; + ma_uint32 bs_consumedBits = bs->consumedBits; + ma_uint32 lzcount = ma_dr_flac__clz(bs_cache); + if (lzcount < sizeof(bs_cache)*8) { + pZeroCounterOut[0] = lzcount; + extract_rice_param_part: + bs_cache <<= lzcount; + bs_consumedBits += lzcount; + if (bs_consumedBits <= riceParamPlus1MaxConsumedBits) { + pRiceParamPartOut[0] = (ma_uint32)(bs_cache >> riceParamPlus1Shift); + bs_cache <<= riceParamPlus1; + bs_consumedBits += riceParamPlus1; + } else { + ma_uint32 riceParamPartHi; + ma_uint32 riceParamPartLo; + ma_uint32 riceParamPartLoBitCount; + riceParamPartHi = (ma_uint32)(bs_cache >> riceParamPlus1Shift); + riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits; + MA_DR_FLAC_ASSERT(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32); + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = riceParamPartLoBitCount; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; +#endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (riceParamPartLoBitCount > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits + riceParamPartLoBitCount; + } + riceParamPartLo = (ma_uint32)(bs_cache >> (MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, riceParamPartLoBitCount))); + pRiceParamPartOut[0] = riceParamPartHi | riceParamPartLo; + bs_cache <<= riceParamPartLoBitCount; + } + } else { + ma_uint32 zeroCounter = (ma_uint32)(MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - bs_consumedBits); + for (;;) { + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; +#endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits; + } + lzcount = ma_dr_flac__clz(bs_cache); + zeroCounter += lzcount; + if (lzcount < sizeof(bs_cache)*8) { + break; + } + } + pZeroCounterOut[0] = zeroCounter; + goto extract_rice_param_part; + } + bs->cache = bs_cache; + bs->consumedBits = bs_consumedBits; + return MA_TRUE; +} +static MA_INLINE ma_bool32 ma_dr_flac__seek_rice_parts(ma_dr_flac_bs* bs, ma_uint8 riceParam) +{ + ma_uint32 riceParamPlus1 = riceParam + 1; + ma_uint32 riceParamPlus1MaxConsumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - riceParamPlus1; + ma_dr_flac_cache_t bs_cache = bs->cache; + ma_uint32 bs_consumedBits = bs->consumedBits; + ma_uint32 lzcount = ma_dr_flac__clz(bs_cache); + if (lzcount < sizeof(bs_cache)*8) { + extract_rice_param_part: + bs_cache <<= lzcount; + bs_consumedBits += lzcount; + if (bs_consumedBits <= riceParamPlus1MaxConsumedBits) { + bs_cache <<= riceParamPlus1; + bs_consumedBits += riceParamPlus1; + } else { + ma_uint32 riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits; + MA_DR_FLAC_ASSERT(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32); + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = riceParamPartLoBitCount; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; +#endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (riceParamPartLoBitCount > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits + riceParamPartLoBitCount; + } + bs_cache <<= riceParamPartLoBitCount; + } + } else { + for (;;) { + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; +#endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits; + } + lzcount = ma_dr_flac__clz(bs_cache); + if (lzcount < sizeof(bs_cache)*8) { + break; + } + } + goto extract_rice_param_part; + } + bs->cache = bs_cache; + bs->consumedBits = bs_consumedBits; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__scalar_zeroorder(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + ma_uint32 zeroCountPart0; + ma_uint32 riceParamPart0; + ma_uint32 riceParamMask; + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + (void)bitsPerSample; + (void)order; + (void)shift; + (void)coefficients; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + i = 0; + while (i < count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + pSamplesOut[i] = riceParamPart0; + i += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__scalar(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + ma_uint32 zeroCountPart0 = 0; + ma_uint32 zeroCountPart1 = 0; + ma_uint32 zeroCountPart2 = 0; + ma_uint32 zeroCountPart3 = 0; + ma_uint32 riceParamPart0 = 0; + ma_uint32 riceParamPart1 = 0; + ma_uint32 riceParamPart2 = 0; + ma_uint32 riceParamPart3 = 0; + ma_uint32 riceParamMask; + const ma_int32* pSamplesOutEnd; + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + if (lpcOrder == 0) { + return ma_dr_flac__decode_samples_with_residual__rice__scalar_zeroorder(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + pSamplesOutEnd = pSamplesOut + (count & ~3); + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + while (pSamplesOut < pSamplesOutEnd) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart1, &riceParamPart1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart2, &riceParamPart2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart3, &riceParamPart3)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart1 &= riceParamMask; + riceParamPart2 &= riceParamMask; + riceParamPart3 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart1 |= (zeroCountPart1 << riceParam); + riceParamPart2 |= (zeroCountPart2 << riceParam); + riceParamPart3 |= (zeroCountPart3 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + riceParamPart1 = (riceParamPart1 >> 1) ^ t[riceParamPart1 & 0x01]; + riceParamPart2 = (riceParamPart2 >> 1) ^ t[riceParamPart2 & 0x01]; + riceParamPart3 = (riceParamPart3 >> 1) ^ t[riceParamPart3 & 0x01]; + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + pSamplesOut[1] = riceParamPart1 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 1); + pSamplesOut[2] = riceParamPart2 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 2); + pSamplesOut[3] = riceParamPart3 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 3); + pSamplesOut += 4; + } + } else { + while (pSamplesOut < pSamplesOutEnd) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart1, &riceParamPart1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart2, &riceParamPart2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart3, &riceParamPart3)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart1 &= riceParamMask; + riceParamPart2 &= riceParamMask; + riceParamPart3 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart1 |= (zeroCountPart1 << riceParam); + riceParamPart2 |= (zeroCountPart2 << riceParam); + riceParamPart3 |= (zeroCountPart3 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + riceParamPart1 = (riceParamPart1 >> 1) ^ t[riceParamPart1 & 0x01]; + riceParamPart2 = (riceParamPart2 >> 1) ^ t[riceParamPart2 & 0x01]; + riceParamPart3 = (riceParamPart3 >> 1) ^ t[riceParamPart3 & 0x01]; + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + pSamplesOut[1] = riceParamPart1 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 1); + pSamplesOut[2] = riceParamPart2 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 2); + pSamplesOut[3] = riceParamPart3 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 3); + pSamplesOut += 4; + } + } + i = (count & ~3); + while (i < count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + } else { + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + } + i += 1; + pSamplesOut += 1; + } + return MA_TRUE; +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE __m128i ma_dr_flac__mm_packs_interleaved_epi32(__m128i a, __m128i b) +{ + __m128i r; + r = _mm_packs_epi32(a, b); + r = _mm_shuffle_epi32(r, _MM_SHUFFLE(3, 1, 2, 0)); + r = _mm_shufflehi_epi16(r, _MM_SHUFFLE(3, 1, 2, 0)); + r = _mm_shufflelo_epi16(r, _MM_SHUFFLE(3, 1, 2, 0)); + return r; +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_SSE41) +static MA_INLINE __m128i ma_dr_flac__mm_not_si128(__m128i a) +{ + return _mm_xor_si128(a, _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128())); +} +static MA_INLINE __m128i ma_dr_flac__mm_hadd_epi32(__m128i x) +{ + __m128i x64 = _mm_add_epi32(x, _mm_shuffle_epi32(x, _MM_SHUFFLE(1, 0, 3, 2))); + __m128i x32 = _mm_shufflelo_epi16(x64, _MM_SHUFFLE(1, 0, 3, 2)); + return _mm_add_epi32(x64, x32); +} +static MA_INLINE __m128i ma_dr_flac__mm_hadd_epi64(__m128i x) +{ + return _mm_add_epi64(x, _mm_shuffle_epi32(x, _MM_SHUFFLE(1, 0, 3, 2))); +} +static MA_INLINE __m128i ma_dr_flac__mm_srai_epi64(__m128i x, int count) +{ + __m128i lo = _mm_srli_epi64(x, count); + __m128i hi = _mm_srai_epi32(x, count); + hi = _mm_and_si128(hi, _mm_set_epi32(0xFFFFFFFF, 0, 0xFFFFFFFF, 0)); + return _mm_or_si128(lo, hi); +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__sse41_32(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts0 = 0; + ma_uint32 zeroCountParts1 = 0; + ma_uint32 zeroCountParts2 = 0; + ma_uint32 zeroCountParts3 = 0; + ma_uint32 riceParamParts0 = 0; + ma_uint32 riceParamParts1 = 0; + ma_uint32 riceParamParts2 = 0; + ma_uint32 riceParamParts3 = 0; + __m128i coefficients128_0; + __m128i coefficients128_4; + __m128i coefficients128_8; + __m128i samples128_0; + __m128i samples128_4; + __m128i samples128_8; + __m128i riceParamMask128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = _mm_set1_epi32(riceParamMask); + coefficients128_0 = _mm_setzero_si128(); + coefficients128_4 = _mm_setzero_si128(); + coefficients128_8 = _mm_setzero_si128(); + samples128_0 = _mm_setzero_si128(); + samples128_4 = _mm_setzero_si128(); + samples128_8 = _mm_setzero_si128(); +#if 1 + { + int runningOrder = order; + if (runningOrder >= 4) { + coefficients128_0 = _mm_loadu_si128((const __m128i*)(coefficients + 0)); + samples128_0 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 4)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_0 = _mm_set_epi32(0, coefficients[2], coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], pSamplesOut[-3], 0); break; + case 2: coefficients128_0 = _mm_set_epi32(0, 0, coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], 0, 0); break; + case 1: coefficients128_0 = _mm_set_epi32(0, 0, 0, coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = _mm_loadu_si128((const __m128i*)(coefficients + 4)); + samples128_4 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 8)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_4 = _mm_set_epi32(0, coefficients[6], coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], pSamplesOut[-7], 0); break; + case 2: coefficients128_4 = _mm_set_epi32(0, 0, coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], 0, 0); break; + case 1: coefficients128_4 = _mm_set_epi32(0, 0, 0, coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = _mm_loadu_si128((const __m128i*)(coefficients + 8)); + samples128_8 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 12)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_8 = _mm_set_epi32(0, coefficients[10], coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], pSamplesOut[-11], 0); break; + case 2: coefficients128_8 = _mm_set_epi32(0, 0, coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], 0, 0); break; + case 1: coefficients128_8 = _mm_set_epi32(0, 0, 0, coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], 0, 0, 0); break; + } + runningOrder = 0; + } + coefficients128_0 = _mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_4 = _mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_8 = _mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(0, 1, 2, 3)); + } +#else + switch (order) + { + case 12: ((ma_int32*)&coefficients128_8)[0] = coefficients[11]; ((ma_int32*)&samples128_8)[0] = pDecodedSamples[-12]; + case 11: ((ma_int32*)&coefficients128_8)[1] = coefficients[10]; ((ma_int32*)&samples128_8)[1] = pDecodedSamples[-11]; + case 10: ((ma_int32*)&coefficients128_8)[2] = coefficients[ 9]; ((ma_int32*)&samples128_8)[2] = pDecodedSamples[-10]; + case 9: ((ma_int32*)&coefficients128_8)[3] = coefficients[ 8]; ((ma_int32*)&samples128_8)[3] = pDecodedSamples[- 9]; + case 8: ((ma_int32*)&coefficients128_4)[0] = coefficients[ 7]; ((ma_int32*)&samples128_4)[0] = pDecodedSamples[- 8]; + case 7: ((ma_int32*)&coefficients128_4)[1] = coefficients[ 6]; ((ma_int32*)&samples128_4)[1] = pDecodedSamples[- 7]; + case 6: ((ma_int32*)&coefficients128_4)[2] = coefficients[ 5]; ((ma_int32*)&samples128_4)[2] = pDecodedSamples[- 6]; + case 5: ((ma_int32*)&coefficients128_4)[3] = coefficients[ 4]; ((ma_int32*)&samples128_4)[3] = pDecodedSamples[- 5]; + case 4: ((ma_int32*)&coefficients128_0)[0] = coefficients[ 3]; ((ma_int32*)&samples128_0)[0] = pDecodedSamples[- 4]; + case 3: ((ma_int32*)&coefficients128_0)[1] = coefficients[ 2]; ((ma_int32*)&samples128_0)[1] = pDecodedSamples[- 3]; + case 2: ((ma_int32*)&coefficients128_0)[2] = coefficients[ 1]; ((ma_int32*)&samples128_0)[2] = pDecodedSamples[- 2]; + case 1: ((ma_int32*)&coefficients128_0)[3] = coefficients[ 0]; ((ma_int32*)&samples128_0)[3] = pDecodedSamples[- 1]; + } +#endif + while (pDecodedSamples < pDecodedSamplesEnd) { + __m128i prediction128; + __m128i zeroCountPart128; + __m128i riceParamPart128; + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) { + return MA_FALSE; + } + zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0); + riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0); + riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128); + riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam)); + riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_add_epi32(ma_dr_flac__mm_not_si128(_mm_and_si128(riceParamPart128, _mm_set1_epi32(0x01))), _mm_set1_epi32(0x01))); + if (order <= 4) { + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_mullo_epi32(coefficients128_0, samples128_0); + prediction128 = ma_dr_flac__mm_hadd_epi32(prediction128); + prediction128 = _mm_srai_epi32(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + } else if (order <= 8) { + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_mullo_epi32(coefficients128_4, samples128_4); + prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_0, samples128_0)); + prediction128 = ma_dr_flac__mm_hadd_epi32(prediction128); + prediction128 = _mm_srai_epi32(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + } else { + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_mullo_epi32(coefficients128_8, samples128_8); + prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_4, samples128_4)); + prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_0, samples128_0)); + prediction128 = ma_dr_flac__mm_hadd_epi32(prediction128); + prediction128 = _mm_srai_epi32(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_8 = _mm_alignr_epi8(samples128_4, samples128_8, 4); + samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + } + _mm_storeu_si128((__m128i*)pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0)) { + return MA_FALSE; + } + riceParamParts0 &= riceParamMask; + riceParamParts0 |= (zeroCountParts0 << riceParam); + riceParamParts0 = (riceParamParts0 >> 1) ^ t[riceParamParts0 & 0x01]; + pDecodedSamples[0] = riceParamParts0 + ma_dr_flac__calculate_prediction_32(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__sse41_64(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts0 = 0; + ma_uint32 zeroCountParts1 = 0; + ma_uint32 zeroCountParts2 = 0; + ma_uint32 zeroCountParts3 = 0; + ma_uint32 riceParamParts0 = 0; + ma_uint32 riceParamParts1 = 0; + ma_uint32 riceParamParts2 = 0; + ma_uint32 riceParamParts3 = 0; + __m128i coefficients128_0; + __m128i coefficients128_4; + __m128i coefficients128_8; + __m128i samples128_0; + __m128i samples128_4; + __m128i samples128_8; + __m128i prediction128; + __m128i riceParamMask128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + MA_DR_FLAC_ASSERT(order <= 12); + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = _mm_set1_epi32(riceParamMask); + prediction128 = _mm_setzero_si128(); + coefficients128_0 = _mm_setzero_si128(); + coefficients128_4 = _mm_setzero_si128(); + coefficients128_8 = _mm_setzero_si128(); + samples128_0 = _mm_setzero_si128(); + samples128_4 = _mm_setzero_si128(); + samples128_8 = _mm_setzero_si128(); +#if 1 + { + int runningOrder = order; + if (runningOrder >= 4) { + coefficients128_0 = _mm_loadu_si128((const __m128i*)(coefficients + 0)); + samples128_0 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 4)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_0 = _mm_set_epi32(0, coefficients[2], coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], pSamplesOut[-3], 0); break; + case 2: coefficients128_0 = _mm_set_epi32(0, 0, coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], 0, 0); break; + case 1: coefficients128_0 = _mm_set_epi32(0, 0, 0, coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = _mm_loadu_si128((const __m128i*)(coefficients + 4)); + samples128_4 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 8)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_4 = _mm_set_epi32(0, coefficients[6], coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], pSamplesOut[-7], 0); break; + case 2: coefficients128_4 = _mm_set_epi32(0, 0, coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], 0, 0); break; + case 1: coefficients128_4 = _mm_set_epi32(0, 0, 0, coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = _mm_loadu_si128((const __m128i*)(coefficients + 8)); + samples128_8 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 12)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_8 = _mm_set_epi32(0, coefficients[10], coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], pSamplesOut[-11], 0); break; + case 2: coefficients128_8 = _mm_set_epi32(0, 0, coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], 0, 0); break; + case 1: coefficients128_8 = _mm_set_epi32(0, 0, 0, coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], 0, 0, 0); break; + } + runningOrder = 0; + } + coefficients128_0 = _mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_4 = _mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_8 = _mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(0, 1, 2, 3)); + } +#else + switch (order) + { + case 12: ((ma_int32*)&coefficients128_8)[0] = coefficients[11]; ((ma_int32*)&samples128_8)[0] = pDecodedSamples[-12]; + case 11: ((ma_int32*)&coefficients128_8)[1] = coefficients[10]; ((ma_int32*)&samples128_8)[1] = pDecodedSamples[-11]; + case 10: ((ma_int32*)&coefficients128_8)[2] = coefficients[ 9]; ((ma_int32*)&samples128_8)[2] = pDecodedSamples[-10]; + case 9: ((ma_int32*)&coefficients128_8)[3] = coefficients[ 8]; ((ma_int32*)&samples128_8)[3] = pDecodedSamples[- 9]; + case 8: ((ma_int32*)&coefficients128_4)[0] = coefficients[ 7]; ((ma_int32*)&samples128_4)[0] = pDecodedSamples[- 8]; + case 7: ((ma_int32*)&coefficients128_4)[1] = coefficients[ 6]; ((ma_int32*)&samples128_4)[1] = pDecodedSamples[- 7]; + case 6: ((ma_int32*)&coefficients128_4)[2] = coefficients[ 5]; ((ma_int32*)&samples128_4)[2] = pDecodedSamples[- 6]; + case 5: ((ma_int32*)&coefficients128_4)[3] = coefficients[ 4]; ((ma_int32*)&samples128_4)[3] = pDecodedSamples[- 5]; + case 4: ((ma_int32*)&coefficients128_0)[0] = coefficients[ 3]; ((ma_int32*)&samples128_0)[0] = pDecodedSamples[- 4]; + case 3: ((ma_int32*)&coefficients128_0)[1] = coefficients[ 2]; ((ma_int32*)&samples128_0)[1] = pDecodedSamples[- 3]; + case 2: ((ma_int32*)&coefficients128_0)[2] = coefficients[ 1]; ((ma_int32*)&samples128_0)[2] = pDecodedSamples[- 2]; + case 1: ((ma_int32*)&coefficients128_0)[3] = coefficients[ 0]; ((ma_int32*)&samples128_0)[3] = pDecodedSamples[- 1]; + } +#endif + while (pDecodedSamples < pDecodedSamplesEnd) { + __m128i zeroCountPart128; + __m128i riceParamPart128; + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) { + return MA_FALSE; + } + zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0); + riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0); + riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128); + riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam)); + riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_add_epi32(ma_dr_flac__mm_not_si128(_mm_and_si128(riceParamPart128, _mm_set1_epi32(1))), _mm_set1_epi32(1))); + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_xor_si128(prediction128, prediction128); + switch (order) + { + case 12: + case 11: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_8, _MM_SHUFFLE(1, 1, 0, 0)))); + case 10: + case 9: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_8, _MM_SHUFFLE(3, 3, 2, 2)))); + case 8: + case 7: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_4, _MM_SHUFFLE(1, 1, 0, 0)))); + case 6: + case 5: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_4, _MM_SHUFFLE(3, 3, 2, 2)))); + case 4: + case 3: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_0, _MM_SHUFFLE(1, 1, 0, 0)))); + case 2: + case 1: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_0, _MM_SHUFFLE(3, 3, 2, 2)))); + } + prediction128 = ma_dr_flac__mm_hadd_epi64(prediction128); + prediction128 = ma_dr_flac__mm_srai_epi64(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_8 = _mm_alignr_epi8(samples128_4, samples128_8, 4); + samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + _mm_storeu_si128((__m128i*)pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0)) { + return MA_FALSE; + } + riceParamParts0 &= riceParamMask; + riceParamParts0 |= (zeroCountParts0 << riceParam); + riceParamParts0 = (riceParamParts0 >> 1) ^ t[riceParamParts0 & 0x01]; + pDecodedSamples[0] = riceParamParts0 + ma_dr_flac__calculate_prediction_64(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__sse41(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + if (lpcOrder > 0 && lpcOrder <= 12) { + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + return ma_dr_flac__decode_samples_with_residual__rice__sse41_64(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } else { + return ma_dr_flac__decode_samples_with_residual__rice__sse41_32(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } + } else { + return ma_dr_flac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac__vst2q_s32(ma_int32* p, int32x4x2_t x) +{ + vst1q_s32(p+0, x.val[0]); + vst1q_s32(p+4, x.val[1]); +} +static MA_INLINE void ma_dr_flac__vst2q_u32(ma_uint32* p, uint32x4x2_t x) +{ + vst1q_u32(p+0, x.val[0]); + vst1q_u32(p+4, x.val[1]); +} +static MA_INLINE void ma_dr_flac__vst2q_f32(float* p, float32x4x2_t x) +{ + vst1q_f32(p+0, x.val[0]); + vst1q_f32(p+4, x.val[1]); +} +static MA_INLINE void ma_dr_flac__vst2q_s16(ma_int16* p, int16x4x2_t x) +{ + vst1q_s16(p, vcombine_s16(x.val[0], x.val[1])); +} +static MA_INLINE void ma_dr_flac__vst2q_u16(ma_uint16* p, uint16x4x2_t x) +{ + vst1q_u16(p, vcombine_u16(x.val[0], x.val[1])); +} +static MA_INLINE int32x4_t ma_dr_flac__vdupq_n_s32x4(ma_int32 x3, ma_int32 x2, ma_int32 x1, ma_int32 x0) +{ + ma_int32 x[4]; + x[3] = x3; + x[2] = x2; + x[1] = x1; + x[0] = x0; + return vld1q_s32(x); +} +static MA_INLINE int32x4_t ma_dr_flac__valignrq_s32_1(int32x4_t a, int32x4_t b) +{ + return vextq_s32(b, a, 1); +} +static MA_INLINE uint32x4_t ma_dr_flac__valignrq_u32_1(uint32x4_t a, uint32x4_t b) +{ + return vextq_u32(b, a, 1); +} +static MA_INLINE int32x2_t ma_dr_flac__vhaddq_s32(int32x4_t x) +{ + int32x2_t r = vadd_s32(vget_high_s32(x), vget_low_s32(x)); + return vpadd_s32(r, r); +} +static MA_INLINE int64x1_t ma_dr_flac__vhaddq_s64(int64x2_t x) +{ + return vadd_s64(vget_high_s64(x), vget_low_s64(x)); +} +static MA_INLINE int32x4_t ma_dr_flac__vrevq_s32(int32x4_t x) +{ + return vrev64q_s32(vcombine_s32(vget_high_s32(x), vget_low_s32(x))); +} +static MA_INLINE int32x4_t ma_dr_flac__vnotq_s32(int32x4_t x) +{ + return veorq_s32(x, vdupq_n_s32(0xFFFFFFFF)); +} +static MA_INLINE uint32x4_t ma_dr_flac__vnotq_u32(uint32x4_t x) +{ + return veorq_u32(x, vdupq_n_u32(0xFFFFFFFF)); +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__neon_32(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts[4]; + ma_uint32 riceParamParts[4]; + int32x4_t coefficients128_0; + int32x4_t coefficients128_4; + int32x4_t coefficients128_8; + int32x4_t samples128_0; + int32x4_t samples128_4; + int32x4_t samples128_8; + uint32x4_t riceParamMask128; + int32x4_t riceParam128; + int32x2_t shift64; + uint32x4_t one128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = vdupq_n_u32(riceParamMask); + riceParam128 = vdupq_n_s32(riceParam); + shift64 = vdup_n_s32(-shift); + one128 = vdupq_n_u32(1); + { + int runningOrder = order; + ma_int32 tempC[4] = {0, 0, 0, 0}; + ma_int32 tempS[4] = {0, 0, 0, 0}; + if (runningOrder >= 4) { + coefficients128_0 = vld1q_s32(coefficients + 0); + samples128_0 = vld1q_s32(pSamplesOut - 4); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[2]; tempS[1] = pSamplesOut[-3]; + case 2: tempC[1] = coefficients[1]; tempS[2] = pSamplesOut[-2]; + case 1: tempC[0] = coefficients[0]; tempS[3] = pSamplesOut[-1]; + } + coefficients128_0 = vld1q_s32(tempC); + samples128_0 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = vld1q_s32(coefficients + 4); + samples128_4 = vld1q_s32(pSamplesOut - 8); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[6]; tempS[1] = pSamplesOut[-7]; + case 2: tempC[1] = coefficients[5]; tempS[2] = pSamplesOut[-6]; + case 1: tempC[0] = coefficients[4]; tempS[3] = pSamplesOut[-5]; + } + coefficients128_4 = vld1q_s32(tempC); + samples128_4 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = vld1q_s32(coefficients + 8); + samples128_8 = vld1q_s32(pSamplesOut - 12); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[10]; tempS[1] = pSamplesOut[-11]; + case 2: tempC[1] = coefficients[ 9]; tempS[2] = pSamplesOut[-10]; + case 1: tempC[0] = coefficients[ 8]; tempS[3] = pSamplesOut[- 9]; + } + coefficients128_8 = vld1q_s32(tempC); + samples128_8 = vld1q_s32(tempS); + runningOrder = 0; + } + coefficients128_0 = ma_dr_flac__vrevq_s32(coefficients128_0); + coefficients128_4 = ma_dr_flac__vrevq_s32(coefficients128_4); + coefficients128_8 = ma_dr_flac__vrevq_s32(coefficients128_8); + } + while (pDecodedSamples < pDecodedSamplesEnd) { + int32x4_t prediction128; + int32x2_t prediction64; + uint32x4_t zeroCountPart128; + uint32x4_t riceParamPart128; + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[1], &riceParamParts[1]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[2], &riceParamParts[2]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[3], &riceParamParts[3])) { + return MA_FALSE; + } + zeroCountPart128 = vld1q_u32(zeroCountParts); + riceParamPart128 = vld1q_u32(riceParamParts); + riceParamPart128 = vandq_u32(riceParamPart128, riceParamMask128); + riceParamPart128 = vorrq_u32(riceParamPart128, vshlq_u32(zeroCountPart128, riceParam128)); + riceParamPart128 = veorq_u32(vshrq_n_u32(riceParamPart128, 1), vaddq_u32(ma_dr_flac__vnotq_u32(vandq_u32(riceParamPart128, one128)), one128)); + if (order <= 4) { + for (i = 0; i < 4; i += 1) { + prediction128 = vmulq_s32(coefficients128_0, samples128_0); + prediction64 = ma_dr_flac__vhaddq_s32(prediction128); + prediction64 = vshl_s32(prediction64, shift64); + prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128))); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + } else if (order <= 8) { + for (i = 0; i < 4; i += 1) { + prediction128 = vmulq_s32(coefficients128_4, samples128_4); + prediction128 = vmlaq_s32(prediction128, coefficients128_0, samples128_0); + prediction64 = ma_dr_flac__vhaddq_s32(prediction128); + prediction64 = vshl_s32(prediction64, shift64); + prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128))); + samples128_4 = ma_dr_flac__valignrq_s32_1(samples128_0, samples128_4); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + } else { + for (i = 0; i < 4; i += 1) { + prediction128 = vmulq_s32(coefficients128_8, samples128_8); + prediction128 = vmlaq_s32(prediction128, coefficients128_4, samples128_4); + prediction128 = vmlaq_s32(prediction128, coefficients128_0, samples128_0); + prediction64 = ma_dr_flac__vhaddq_s32(prediction128); + prediction64 = vshl_s32(prediction64, shift64); + prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128))); + samples128_8 = ma_dr_flac__valignrq_s32_1(samples128_4, samples128_8); + samples128_4 = ma_dr_flac__valignrq_s32_1(samples128_0, samples128_4); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + } + vst1q_s32(pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0])) { + return MA_FALSE; + } + riceParamParts[0] &= riceParamMask; + riceParamParts[0] |= (zeroCountParts[0] << riceParam); + riceParamParts[0] = (riceParamParts[0] >> 1) ^ t[riceParamParts[0] & 0x01]; + pDecodedSamples[0] = riceParamParts[0] + ma_dr_flac__calculate_prediction_32(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__neon_64(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts[4]; + ma_uint32 riceParamParts[4]; + int32x4_t coefficients128_0; + int32x4_t coefficients128_4; + int32x4_t coefficients128_8; + int32x4_t samples128_0; + int32x4_t samples128_4; + int32x4_t samples128_8; + uint32x4_t riceParamMask128; + int32x4_t riceParam128; + int64x1_t shift64; + uint32x4_t one128; + int64x2_t prediction128 = { 0 }; + uint32x4_t zeroCountPart128; + uint32x4_t riceParamPart128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = vdupq_n_u32(riceParamMask); + riceParam128 = vdupq_n_s32(riceParam); + shift64 = vdup_n_s64(-shift); + one128 = vdupq_n_u32(1); + { + int runningOrder = order; + ma_int32 tempC[4] = {0, 0, 0, 0}; + ma_int32 tempS[4] = {0, 0, 0, 0}; + if (runningOrder >= 4) { + coefficients128_0 = vld1q_s32(coefficients + 0); + samples128_0 = vld1q_s32(pSamplesOut - 4); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[2]; tempS[1] = pSamplesOut[-3]; + case 2: tempC[1] = coefficients[1]; tempS[2] = pSamplesOut[-2]; + case 1: tempC[0] = coefficients[0]; tempS[3] = pSamplesOut[-1]; + } + coefficients128_0 = vld1q_s32(tempC); + samples128_0 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = vld1q_s32(coefficients + 4); + samples128_4 = vld1q_s32(pSamplesOut - 8); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[6]; tempS[1] = pSamplesOut[-7]; + case 2: tempC[1] = coefficients[5]; tempS[2] = pSamplesOut[-6]; + case 1: tempC[0] = coefficients[4]; tempS[3] = pSamplesOut[-5]; + } + coefficients128_4 = vld1q_s32(tempC); + samples128_4 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = vld1q_s32(coefficients + 8); + samples128_8 = vld1q_s32(pSamplesOut - 12); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[10]; tempS[1] = pSamplesOut[-11]; + case 2: tempC[1] = coefficients[ 9]; tempS[2] = pSamplesOut[-10]; + case 1: tempC[0] = coefficients[ 8]; tempS[3] = pSamplesOut[- 9]; + } + coefficients128_8 = vld1q_s32(tempC); + samples128_8 = vld1q_s32(tempS); + runningOrder = 0; + } + coefficients128_0 = ma_dr_flac__vrevq_s32(coefficients128_0); + coefficients128_4 = ma_dr_flac__vrevq_s32(coefficients128_4); + coefficients128_8 = ma_dr_flac__vrevq_s32(coefficients128_8); + } + while (pDecodedSamples < pDecodedSamplesEnd) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[1], &riceParamParts[1]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[2], &riceParamParts[2]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[3], &riceParamParts[3])) { + return MA_FALSE; + } + zeroCountPart128 = vld1q_u32(zeroCountParts); + riceParamPart128 = vld1q_u32(riceParamParts); + riceParamPart128 = vandq_u32(riceParamPart128, riceParamMask128); + riceParamPart128 = vorrq_u32(riceParamPart128, vshlq_u32(zeroCountPart128, riceParam128)); + riceParamPart128 = veorq_u32(vshrq_n_u32(riceParamPart128, 1), vaddq_u32(ma_dr_flac__vnotq_u32(vandq_u32(riceParamPart128, one128)), one128)); + for (i = 0; i < 4; i += 1) { + int64x1_t prediction64; + prediction128 = veorq_s64(prediction128, prediction128); + switch (order) + { + case 12: + case 11: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_8), vget_low_s32(samples128_8))); + case 10: + case 9: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_8), vget_high_s32(samples128_8))); + case 8: + case 7: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_4), vget_low_s32(samples128_4))); + case 6: + case 5: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_4), vget_high_s32(samples128_4))); + case 4: + case 3: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_0), vget_low_s32(samples128_0))); + case 2: + case 1: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_0), vget_high_s32(samples128_0))); + } + prediction64 = ma_dr_flac__vhaddq_s64(prediction128); + prediction64 = vshl_s64(prediction64, shift64); + prediction64 = vadd_s64(prediction64, vdup_n_s64(vgetq_lane_u32(riceParamPart128, 0))); + samples128_8 = ma_dr_flac__valignrq_s32_1(samples128_4, samples128_8); + samples128_4 = ma_dr_flac__valignrq_s32_1(samples128_0, samples128_4); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(vreinterpret_s32_s64(prediction64), vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + vst1q_s32(pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0])) { + return MA_FALSE; + } + riceParamParts[0] &= riceParamMask; + riceParamParts[0] |= (zeroCountParts[0] << riceParam); + riceParamParts[0] = (riceParamParts[0] >> 1) ^ t[riceParamParts[0] & 0x01]; + pDecodedSamples[0] = riceParamParts[0] + ma_dr_flac__calculate_prediction_64(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__neon(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + if (lpcOrder > 0 && lpcOrder <= 12) { + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + return ma_dr_flac__decode_samples_with_residual__rice__neon_64(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } else { + return ma_dr_flac__decode_samples_with_residual__rice__neon_32(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } + } else { + return ma_dr_flac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } +} +#endif +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE41) + if (ma_dr_flac__gIsSSE41Supported) { + return ma_dr_flac__decode_samples_with_residual__rice__sse41(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported) { + return ma_dr_flac__decode_samples_with_residual__rice__neon(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } else +#endif + { +#if 0 + return ma_dr_flac__decode_samples_with_residual__rice__reference(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); +#else + return ma_dr_flac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); +#endif + } +} +static ma_bool32 ma_dr_flac__read_and_seek_residual__rice(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam) +{ + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + for (i = 0; i < count; ++i) { + if (!ma_dr_flac__seek_rice_parts(bs, riceParam)) { + return MA_FALSE; + } + } + return MA_TRUE; +} +#if defined(__clang__) +__attribute__((no_sanitize("signed-integer-overflow"))) +#endif +static ma_bool32 ma_dr_flac__decode_samples_with_residual__unencoded(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 unencodedBitsPerSample, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(unencodedBitsPerSample <= 31); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + for (i = 0; i < count; ++i) { + if (unencodedBitsPerSample > 0) { + if (!ma_dr_flac__read_int32(bs, unencodedBitsPerSample, pSamplesOut + i)) { + return MA_FALSE; + } + } else { + pSamplesOut[i] = 0; + } + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + pSamplesOut[i] += ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } else { + pSamplesOut[i] += ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 blockSize, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pDecodedSamples) +{ + ma_uint8 residualMethod; + ma_uint8 partitionOrder; + ma_uint32 samplesInPartition; + ma_uint32 partitionsRemaining; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(blockSize != 0); + MA_DR_FLAC_ASSERT(pDecodedSamples != NULL); + if (!ma_dr_flac__read_uint8(bs, 2, &residualMethod)) { + return MA_FALSE; + } + if (residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + return MA_FALSE; + } + pDecodedSamples += lpcOrder; + if (!ma_dr_flac__read_uint8(bs, 4, &partitionOrder)) { + return MA_FALSE; + } + if (partitionOrder > 8) { + return MA_FALSE; + } + if ((blockSize / (1 << partitionOrder)) < lpcOrder) { + return MA_FALSE; + } + samplesInPartition = (blockSize / (1 << partitionOrder)) - lpcOrder; + partitionsRemaining = (1 << partitionOrder); + for (;;) { + ma_uint8 riceParam = 0; + if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) { + if (!ma_dr_flac__read_uint8(bs, 4, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 15) { + riceParam = 0xFF; + } + } else if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + if (!ma_dr_flac__read_uint8(bs, 5, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 31) { + riceParam = 0xFF; + } + } + if (riceParam != 0xFF) { + if (!ma_dr_flac__decode_samples_with_residual__rice(bs, bitsPerSample, samplesInPartition, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pDecodedSamples)) { + return MA_FALSE; + } + } else { + ma_uint8 unencodedBitsPerSample = 0; + if (!ma_dr_flac__read_uint8(bs, 5, &unencodedBitsPerSample)) { + return MA_FALSE; + } + if (!ma_dr_flac__decode_samples_with_residual__unencoded(bs, bitsPerSample, samplesInPartition, unencodedBitsPerSample, lpcOrder, lpcShift, lpcPrecision, coefficients, pDecodedSamples)) { + return MA_FALSE; + } + } + pDecodedSamples += samplesInPartition; + if (partitionsRemaining == 1) { + break; + } + partitionsRemaining -= 1; + if (partitionOrder != 0) { + samplesInPartition = blockSize / (1 << partitionOrder); + } + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_and_seek_residual(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 order) +{ + ma_uint8 residualMethod; + ma_uint8 partitionOrder; + ma_uint32 samplesInPartition; + ma_uint32 partitionsRemaining; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(blockSize != 0); + if (!ma_dr_flac__read_uint8(bs, 2, &residualMethod)) { + return MA_FALSE; + } + if (residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 4, &partitionOrder)) { + return MA_FALSE; + } + if (partitionOrder > 8) { + return MA_FALSE; + } + if ((blockSize / (1 << partitionOrder)) <= order) { + return MA_FALSE; + } + samplesInPartition = (blockSize / (1 << partitionOrder)) - order; + partitionsRemaining = (1 << partitionOrder); + for (;;) + { + ma_uint8 riceParam = 0; + if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) { + if (!ma_dr_flac__read_uint8(bs, 4, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 15) { + riceParam = 0xFF; + } + } else if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + if (!ma_dr_flac__read_uint8(bs, 5, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 31) { + riceParam = 0xFF; + } + } + if (riceParam != 0xFF) { + if (!ma_dr_flac__read_and_seek_residual__rice(bs, samplesInPartition, riceParam)) { + return MA_FALSE; + } + } else { + ma_uint8 unencodedBitsPerSample = 0; + if (!ma_dr_flac__read_uint8(bs, 5, &unencodedBitsPerSample)) { + return MA_FALSE; + } + if (!ma_dr_flac__seek_bits(bs, unencodedBitsPerSample * samplesInPartition)) { + return MA_FALSE; + } + } + if (partitionsRemaining == 1) { + break; + } + partitionsRemaining -= 1; + samplesInPartition = blockSize / (1 << partitionOrder); + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__constant(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 subframeBitsPerSample, ma_int32* pDecodedSamples) +{ + ma_uint32 i; + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, subframeBitsPerSample, &sample)) { + return MA_FALSE; + } + for (i = 0; i < blockSize; ++i) { + pDecodedSamples[i] = sample; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__verbatim(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 subframeBitsPerSample, ma_int32* pDecodedSamples) +{ + ma_uint32 i; + for (i = 0; i < blockSize; ++i) { + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, subframeBitsPerSample, &sample)) { + return MA_FALSE; + } + pDecodedSamples[i] = sample; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__fixed(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 subframeBitsPerSample, ma_uint8 lpcOrder, ma_int32* pDecodedSamples) +{ + ma_uint32 i; + static ma_int32 lpcCoefficientsTable[5][4] = { + {0, 0, 0, 0}, + {1, 0, 0, 0}, + {2, -1, 0, 0}, + {3, -3, 1, 0}, + {4, -6, 4, -1} + }; + for (i = 0; i < lpcOrder; ++i) { + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, subframeBitsPerSample, &sample)) { + return MA_FALSE; + } + pDecodedSamples[i] = sample; + } + if (!ma_dr_flac__decode_samples_with_residual(bs, subframeBitsPerSample, blockSize, lpcOrder, 0, 4, lpcCoefficientsTable[lpcOrder], pDecodedSamples)) { + return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__lpc(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 bitsPerSample, ma_uint8 lpcOrder, ma_int32* pDecodedSamples) +{ + ma_uint8 i; + ma_uint8 lpcPrecision; + ma_int8 lpcShift; + ma_int32 coefficients[32]; + for (i = 0; i < lpcOrder; ++i) { + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, bitsPerSample, &sample)) { + return MA_FALSE; + } + pDecodedSamples[i] = sample; + } + if (!ma_dr_flac__read_uint8(bs, 4, &lpcPrecision)) { + return MA_FALSE; + } + if (lpcPrecision == 15) { + return MA_FALSE; + } + lpcPrecision += 1; + if (!ma_dr_flac__read_int8(bs, 5, &lpcShift)) { + return MA_FALSE; + } + if (lpcShift < 0) { + return MA_FALSE; + } + MA_DR_FLAC_ZERO_MEMORY(coefficients, sizeof(coefficients)); + for (i = 0; i < lpcOrder; ++i) { + if (!ma_dr_flac__read_int32(bs, lpcPrecision, coefficients + i)) { + return MA_FALSE; + } + } + if (!ma_dr_flac__decode_samples_with_residual(bs, bitsPerSample, blockSize, lpcOrder, lpcShift, lpcPrecision, coefficients, pDecodedSamples)) { + return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_next_flac_frame_header(ma_dr_flac_bs* bs, ma_uint8 streaminfoBitsPerSample, ma_dr_flac_frame_header* header) +{ + const ma_uint32 sampleRateTable[12] = {0, 88200, 176400, 192000, 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000}; + const ma_uint8 bitsPerSampleTable[8] = {0, 8, 12, (ma_uint8)-1, 16, 20, 24, (ma_uint8)-1}; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(header != NULL); + for (;;) { + ma_uint8 crc8 = 0xCE; + ma_uint8 reserved = 0; + ma_uint8 blockingStrategy = 0; + ma_uint8 blockSize = 0; + ma_uint8 sampleRate = 0; + ma_uint8 channelAssignment = 0; + ma_uint8 bitsPerSample = 0; + ma_bool32 isVariableBlockSize; + if (!ma_dr_flac__find_and_seek_to_next_sync_code(bs)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 1, &reserved)) { + return MA_FALSE; + } + if (reserved == 1) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, reserved, 1); + if (!ma_dr_flac__read_uint8(bs, 1, &blockingStrategy)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, blockingStrategy, 1); + if (!ma_dr_flac__read_uint8(bs, 4, &blockSize)) { + return MA_FALSE; + } + if (blockSize == 0) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, blockSize, 4); + if (!ma_dr_flac__read_uint8(bs, 4, &sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, sampleRate, 4); + if (!ma_dr_flac__read_uint8(bs, 4, &channelAssignment)) { + return MA_FALSE; + } + if (channelAssignment > 10) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, channelAssignment, 4); + if (!ma_dr_flac__read_uint8(bs, 3, &bitsPerSample)) { + return MA_FALSE; + } + if (bitsPerSample == 3 || bitsPerSample == 7) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, bitsPerSample, 3); + if (!ma_dr_flac__read_uint8(bs, 1, &reserved)) { + return MA_FALSE; + } + if (reserved == 1) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, reserved, 1); + isVariableBlockSize = blockingStrategy == 1; + if (isVariableBlockSize) { + ma_uint64 pcmFrameNumber; + ma_result result = ma_dr_flac__read_utf8_coded_number(bs, &pcmFrameNumber, &crc8); + if (result != MA_SUCCESS) { + if (result == MA_AT_END) { + return MA_FALSE; + } else { + continue; + } + } + header->flacFrameNumber = 0; + header->pcmFrameNumber = pcmFrameNumber; + } else { + ma_uint64 flacFrameNumber = 0; + ma_result result = ma_dr_flac__read_utf8_coded_number(bs, &flacFrameNumber, &crc8); + if (result != MA_SUCCESS) { + if (result == MA_AT_END) { + return MA_FALSE; + } else { + continue; + } + } + header->flacFrameNumber = (ma_uint32)flacFrameNumber; + header->pcmFrameNumber = 0; + } + MA_DR_FLAC_ASSERT(blockSize > 0); + if (blockSize == 1) { + header->blockSizeInPCMFrames = 192; + } else if (blockSize <= 5) { + MA_DR_FLAC_ASSERT(blockSize >= 2); + header->blockSizeInPCMFrames = 576 * (1 << (blockSize - 2)); + } else if (blockSize == 6) { + if (!ma_dr_flac__read_uint16(bs, 8, &header->blockSizeInPCMFrames)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->blockSizeInPCMFrames, 8); + header->blockSizeInPCMFrames += 1; + } else if (blockSize == 7) { + if (!ma_dr_flac__read_uint16(bs, 16, &header->blockSizeInPCMFrames)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->blockSizeInPCMFrames, 16); + if (header->blockSizeInPCMFrames == 0xFFFF) { + return MA_FALSE; + } + header->blockSizeInPCMFrames += 1; + } else { + MA_DR_FLAC_ASSERT(blockSize >= 8); + header->blockSizeInPCMFrames = 256 * (1 << (blockSize - 8)); + } + if (sampleRate <= 11) { + header->sampleRate = sampleRateTable[sampleRate]; + } else if (sampleRate == 12) { + if (!ma_dr_flac__read_uint32(bs, 8, &header->sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->sampleRate, 8); + header->sampleRate *= 1000; + } else if (sampleRate == 13) { + if (!ma_dr_flac__read_uint32(bs, 16, &header->sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->sampleRate, 16); + } else if (sampleRate == 14) { + if (!ma_dr_flac__read_uint32(bs, 16, &header->sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->sampleRate, 16); + header->sampleRate *= 10; + } else { + continue; + } + header->channelAssignment = channelAssignment; + header->bitsPerSample = bitsPerSampleTable[bitsPerSample]; + if (header->bitsPerSample == 0) { + header->bitsPerSample = streaminfoBitsPerSample; + } + if (header->bitsPerSample != streaminfoBitsPerSample) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 8, &header->crc8)) { + return MA_FALSE; + } +#ifndef MA_DR_FLAC_NO_CRC + if (header->crc8 != crc8) { + continue; + } +#endif + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac__read_subframe_header(ma_dr_flac_bs* bs, ma_dr_flac_subframe* pSubframe) +{ + ma_uint8 header; + int type; + if (!ma_dr_flac__read_uint8(bs, 8, &header)) { + return MA_FALSE; + } + if ((header & 0x80) != 0) { + return MA_FALSE; + } + type = (header & 0x7E) >> 1; + if (type == 0) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_CONSTANT; + } else if (type == 1) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_VERBATIM; + } else { + if ((type & 0x20) != 0) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_LPC; + pSubframe->lpcOrder = (ma_uint8)(type & 0x1F) + 1; + } else if ((type & 0x08) != 0) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_FIXED; + pSubframe->lpcOrder = (ma_uint8)(type & 0x07); + if (pSubframe->lpcOrder > 4) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_RESERVED; + pSubframe->lpcOrder = 0; + } + } else { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_RESERVED; + } + } + if (pSubframe->subframeType == MA_DR_FLAC_SUBFRAME_RESERVED) { + return MA_FALSE; + } + pSubframe->wastedBitsPerSample = 0; + if ((header & 0x01) == 1) { + unsigned int wastedBitsPerSample; + if (!ma_dr_flac__seek_past_next_set_bit(bs, &wastedBitsPerSample)) { + return MA_FALSE; + } + pSubframe->wastedBitsPerSample = (ma_uint8)wastedBitsPerSample + 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_subframe(ma_dr_flac_bs* bs, ma_dr_flac_frame* frame, int subframeIndex, ma_int32* pDecodedSamplesOut) +{ + ma_dr_flac_subframe* pSubframe; + ma_uint32 subframeBitsPerSample; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(frame != NULL); + pSubframe = frame->subframes + subframeIndex; + if (!ma_dr_flac__read_subframe_header(bs, pSubframe)) { + return MA_FALSE; + } + subframeBitsPerSample = frame->header.bitsPerSample; + if ((frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) { + subframeBitsPerSample += 1; + } else if (frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) { + subframeBitsPerSample += 1; + } + if (subframeBitsPerSample > 32) { + return MA_FALSE; + } + if (pSubframe->wastedBitsPerSample >= subframeBitsPerSample) { + return MA_FALSE; + } + subframeBitsPerSample -= pSubframe->wastedBitsPerSample; + pSubframe->pSamplesS32 = pDecodedSamplesOut; + switch (pSubframe->subframeType) + { + case MA_DR_FLAC_SUBFRAME_CONSTANT: + { + ma_dr_flac__decode_samples__constant(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->pSamplesS32); + } break; + case MA_DR_FLAC_SUBFRAME_VERBATIM: + { + ma_dr_flac__decode_samples__verbatim(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->pSamplesS32); + } break; + case MA_DR_FLAC_SUBFRAME_FIXED: + { + ma_dr_flac__decode_samples__fixed(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->lpcOrder, pSubframe->pSamplesS32); + } break; + case MA_DR_FLAC_SUBFRAME_LPC: + { + ma_dr_flac__decode_samples__lpc(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->lpcOrder, pSubframe->pSamplesS32); + } break; + default: return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__seek_subframe(ma_dr_flac_bs* bs, ma_dr_flac_frame* frame, int subframeIndex) +{ + ma_dr_flac_subframe* pSubframe; + ma_uint32 subframeBitsPerSample; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(frame != NULL); + pSubframe = frame->subframes + subframeIndex; + if (!ma_dr_flac__read_subframe_header(bs, pSubframe)) { + return MA_FALSE; + } + subframeBitsPerSample = frame->header.bitsPerSample; + if ((frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) { + subframeBitsPerSample += 1; + } else if (frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) { + subframeBitsPerSample += 1; + } + if (pSubframe->wastedBitsPerSample >= subframeBitsPerSample) { + return MA_FALSE; + } + subframeBitsPerSample -= pSubframe->wastedBitsPerSample; + pSubframe->pSamplesS32 = NULL; + switch (pSubframe->subframeType) + { + case MA_DR_FLAC_SUBFRAME_CONSTANT: + { + if (!ma_dr_flac__seek_bits(bs, subframeBitsPerSample)) { + return MA_FALSE; + } + } break; + case MA_DR_FLAC_SUBFRAME_VERBATIM: + { + unsigned int bitsToSeek = frame->header.blockSizeInPCMFrames * subframeBitsPerSample; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + } break; + case MA_DR_FLAC_SUBFRAME_FIXED: + { + unsigned int bitsToSeek = pSubframe->lpcOrder * subframeBitsPerSample; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_and_seek_residual(bs, frame->header.blockSizeInPCMFrames, pSubframe->lpcOrder)) { + return MA_FALSE; + } + } break; + case MA_DR_FLAC_SUBFRAME_LPC: + { + ma_uint8 lpcPrecision; + unsigned int bitsToSeek = pSubframe->lpcOrder * subframeBitsPerSample; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 4, &lpcPrecision)) { + return MA_FALSE; + } + if (lpcPrecision == 15) { + return MA_FALSE; + } + lpcPrecision += 1; + bitsToSeek = (pSubframe->lpcOrder * lpcPrecision) + 5; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_and_seek_residual(bs, frame->header.blockSizeInPCMFrames, pSubframe->lpcOrder)) { + return MA_FALSE; + } + } break; + default: return MA_FALSE; + } + return MA_TRUE; +} +static MA_INLINE ma_uint8 ma_dr_flac__get_channel_count_from_channel_assignment(ma_int8 channelAssignment) +{ + ma_uint8 lookup[] = {1, 2, 3, 4, 5, 6, 7, 8, 2, 2, 2}; + MA_DR_FLAC_ASSERT(channelAssignment <= 10); + return lookup[channelAssignment]; +} +static ma_result ma_dr_flac__decode_flac_frame(ma_dr_flac* pFlac) +{ + int channelCount; + int i; + ma_uint8 paddingSizeInBits; + ma_uint16 desiredCRC16; +#ifndef MA_DR_FLAC_NO_CRC + ma_uint16 actualCRC16; +#endif + MA_DR_FLAC_ZERO_MEMORY(pFlac->currentFLACFrame.subframes, sizeof(pFlac->currentFLACFrame.subframes)); + if (pFlac->currentFLACFrame.header.blockSizeInPCMFrames > pFlac->maxBlockSizeInPCMFrames) { + return MA_ERROR; + } + channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + if (channelCount != (int)pFlac->channels) { + return MA_ERROR; + } + for (i = 0; i < channelCount; ++i) { + if (!ma_dr_flac__decode_subframe(&pFlac->bs, &pFlac->currentFLACFrame, i, pFlac->pDecodedSamples + (pFlac->currentFLACFrame.header.blockSizeInPCMFrames * i))) { + return MA_ERROR; + } + } + paddingSizeInBits = (ma_uint8)(MA_DR_FLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7); + if (paddingSizeInBits > 0) { + ma_uint8 padding = 0; + if (!ma_dr_flac__read_uint8(&pFlac->bs, paddingSizeInBits, &padding)) { + return MA_AT_END; + } + } +#ifndef MA_DR_FLAC_NO_CRC + actualCRC16 = ma_dr_flac__flush_crc16(&pFlac->bs); +#endif + if (!ma_dr_flac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) { + return MA_AT_END; + } +#ifndef MA_DR_FLAC_NO_CRC + if (actualCRC16 != desiredCRC16) { + return MA_CRC_MISMATCH; + } +#endif + pFlac->currentFLACFrame.pcmFramesRemaining = pFlac->currentFLACFrame.header.blockSizeInPCMFrames; + return MA_SUCCESS; +} +static ma_result ma_dr_flac__seek_flac_frame(ma_dr_flac* pFlac) +{ + int channelCount; + int i; + ma_uint16 desiredCRC16; +#ifndef MA_DR_FLAC_NO_CRC + ma_uint16 actualCRC16; +#endif + channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + for (i = 0; i < channelCount; ++i) { + if (!ma_dr_flac__seek_subframe(&pFlac->bs, &pFlac->currentFLACFrame, i)) { + return MA_ERROR; + } + } + if (!ma_dr_flac__seek_bits(&pFlac->bs, MA_DR_FLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7)) { + return MA_ERROR; + } +#ifndef MA_DR_FLAC_NO_CRC + actualCRC16 = ma_dr_flac__flush_crc16(&pFlac->bs); +#endif + if (!ma_dr_flac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) { + return MA_AT_END; + } +#ifndef MA_DR_FLAC_NO_CRC + if (actualCRC16 != desiredCRC16) { + return MA_CRC_MISMATCH; + } +#endif + return MA_SUCCESS; +} +static ma_bool32 ma_dr_flac__read_and_decode_next_flac_frame(ma_dr_flac* pFlac) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + for (;;) { + ma_result result; + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + result = ma_dr_flac__decode_flac_frame(pFlac); + if (result != MA_SUCCESS) { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return MA_FALSE; + } + } + return MA_TRUE; + } +} +static void ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(ma_dr_flac* pFlac, ma_uint64* pFirstPCMFrame, ma_uint64* pLastPCMFrame) +{ + ma_uint64 firstPCMFrame; + ma_uint64 lastPCMFrame; + MA_DR_FLAC_ASSERT(pFlac != NULL); + firstPCMFrame = pFlac->currentFLACFrame.header.pcmFrameNumber; + if (firstPCMFrame == 0) { + firstPCMFrame = ((ma_uint64)pFlac->currentFLACFrame.header.flacFrameNumber) * pFlac->maxBlockSizeInPCMFrames; + } + lastPCMFrame = firstPCMFrame + pFlac->currentFLACFrame.header.blockSizeInPCMFrames; + if (lastPCMFrame > 0) { + lastPCMFrame -= 1; + } + if (pFirstPCMFrame) { + *pFirstPCMFrame = firstPCMFrame; + } + if (pLastPCMFrame) { + *pLastPCMFrame = lastPCMFrame; + } +} +static ma_bool32 ma_dr_flac__seek_to_first_frame(ma_dr_flac* pFlac) +{ + ma_bool32 result; + MA_DR_FLAC_ASSERT(pFlac != NULL); + result = ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes); + MA_DR_FLAC_ZERO_MEMORY(&pFlac->currentFLACFrame, sizeof(pFlac->currentFLACFrame)); + pFlac->currentPCMFrame = 0; + return result; +} +static MA_INLINE ma_result ma_dr_flac__seek_to_next_flac_frame(ma_dr_flac* pFlac) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + return ma_dr_flac__seek_flac_frame(pFlac); +} +static ma_uint64 ma_dr_flac__seek_forward_by_pcm_frames(ma_dr_flac* pFlac, ma_uint64 pcmFramesToSeek) +{ + ma_uint64 pcmFramesRead = 0; + while (pcmFramesToSeek > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + if (pFlac->currentFLACFrame.pcmFramesRemaining > pcmFramesToSeek) { + pcmFramesRead += pcmFramesToSeek; + pFlac->currentFLACFrame.pcmFramesRemaining -= (ma_uint32)pcmFramesToSeek; + pcmFramesToSeek = 0; + } else { + pcmFramesRead += pFlac->currentFLACFrame.pcmFramesRemaining; + pcmFramesToSeek -= pFlac->currentFLACFrame.pcmFramesRemaining; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + } + } + } + pFlac->currentPCMFrame += pcmFramesRead; + return pcmFramesRead; +} +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__brute_force(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_bool32 isMidFrame = MA_FALSE; + ma_uint64 runningPCMFrameCount; + MA_DR_FLAC_ASSERT(pFlac != NULL); + if (pcmFrameIndex >= pFlac->currentPCMFrame) { + runningPCMFrameCount = pFlac->currentPCMFrame; + if (pFlac->currentPCMFrame == 0 && pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } else { + isMidFrame = MA_TRUE; + } + } else { + runningPCMFrameCount = 0; + if (!ma_dr_flac__seek_to_first_frame(pFlac)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } + for (;;) { + ma_uint64 pcmFrameCountInThisFLACFrame; + ma_uint64 firstPCMFrameInFLACFrame = 0; + ma_uint64 lastPCMFrameInFLACFrame = 0; + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame); + pcmFrameCountInThisFLACFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1; + if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFLACFrame)) { + ma_uint64 pcmFramesToDecode = pcmFrameIndex - runningPCMFrameCount; + if (!isMidFrame) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } + } else { + if (!isMidFrame) { + ma_result result = ma_dr_flac__seek_to_next_flac_frame(pFlac); + if (result == MA_SUCCESS) { + runningPCMFrameCount += pcmFrameCountInThisFLACFrame; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + runningPCMFrameCount += pFlac->currentFLACFrame.pcmFramesRemaining; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + isMidFrame = MA_FALSE; + } + if (pcmFrameIndex == pFlac->totalPCMFrameCount && runningPCMFrameCount == pFlac->totalPCMFrameCount) { + return MA_TRUE; + } + } + next_iteration: + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } +} +#if !defined(MA_DR_FLAC_NO_CRC) +#define MA_DR_FLAC_BINARY_SEARCH_APPROX_COMPRESSION_RATIO 0.6f +static ma_bool32 ma_dr_flac__seek_to_approximate_flac_frame_to_byte(ma_dr_flac* pFlac, ma_uint64 targetByte, ma_uint64 rangeLo, ma_uint64 rangeHi, ma_uint64* pLastSuccessfulSeekOffset) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + MA_DR_FLAC_ASSERT(pLastSuccessfulSeekOffset != NULL); + MA_DR_FLAC_ASSERT(targetByte >= rangeLo); + MA_DR_FLAC_ASSERT(targetByte <= rangeHi); + *pLastSuccessfulSeekOffset = pFlac->firstFLACFramePosInBytes; + for (;;) { + ma_uint64 lastTargetByte = targetByte; + if (!ma_dr_flac__seek_to_byte(&pFlac->bs, targetByte)) { + if (targetByte == 0) { + ma_dr_flac__seek_to_first_frame(pFlac); + return MA_FALSE; + } + targetByte = rangeLo + ((rangeHi - rangeLo)/2); + rangeHi = targetByte; + } else { + MA_DR_FLAC_ZERO_MEMORY(&pFlac->currentFLACFrame, sizeof(pFlac->currentFLACFrame)); +#if 1 + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + targetByte = rangeLo + ((rangeHi - rangeLo)/2); + rangeHi = targetByte; + } else { + break; + } +#else + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + targetByte = rangeLo + ((rangeHi - rangeLo)/2); + rangeHi = targetByte; + } else { + break; + } +#endif + } + if(targetByte == lastTargetByte) { + return MA_FALSE; + } + } + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &pFlac->currentPCMFrame, NULL); + MA_DR_FLAC_ASSERT(targetByte <= rangeHi); + *pLastSuccessfulSeekOffset = targetByte; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(ma_dr_flac* pFlac, ma_uint64 offset) +{ +#if 0 + if (ma_dr_flac__decode_flac_frame(pFlac) != MA_SUCCESS) { + if (ma_dr_flac__read_and_decode_next_flac_frame(pFlac) == MA_FALSE) { + return MA_FALSE; + } + } +#endif + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, offset) == offset; +} +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__binary_search_internal(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex, ma_uint64 byteRangeLo, ma_uint64 byteRangeHi) +{ + ma_uint64 targetByte; + ma_uint64 pcmRangeLo = pFlac->totalPCMFrameCount; + ma_uint64 pcmRangeHi = 0; + ma_uint64 lastSuccessfulSeekOffset = (ma_uint64)-1; + ma_uint64 closestSeekOffsetBeforeTargetPCMFrame = byteRangeLo; + ma_uint32 seekForwardThreshold = (pFlac->maxBlockSizeInPCMFrames != 0) ? pFlac->maxBlockSizeInPCMFrames*2 : 4096; + targetByte = byteRangeLo + (ma_uint64)(((ma_int64)((pcmFrameIndex - pFlac->currentPCMFrame) * pFlac->channels * pFlac->bitsPerSample)/8.0f) * MA_DR_FLAC_BINARY_SEARCH_APPROX_COMPRESSION_RATIO); + if (targetByte > byteRangeHi) { + targetByte = byteRangeHi; + } + for (;;) { + if (ma_dr_flac__seek_to_approximate_flac_frame_to_byte(pFlac, targetByte, byteRangeLo, byteRangeHi, &lastSuccessfulSeekOffset)) { + ma_uint64 newPCMRangeLo; + ma_uint64 newPCMRangeHi; + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &newPCMRangeLo, &newPCMRangeHi); + if (pcmRangeLo == newPCMRangeLo) { + if (!ma_dr_flac__seek_to_approximate_flac_frame_to_byte(pFlac, closestSeekOffsetBeforeTargetPCMFrame, closestSeekOffsetBeforeTargetPCMFrame, byteRangeHi, &lastSuccessfulSeekOffset)) { + break; + } + if (ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame)) { + return MA_TRUE; + } else { + break; + } + } + pcmRangeLo = newPCMRangeLo; + pcmRangeHi = newPCMRangeHi; + if (pcmRangeLo <= pcmFrameIndex && pcmRangeHi >= pcmFrameIndex) { + if (ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame) ) { + return MA_TRUE; + } else { + break; + } + } else { + const float approxCompressionRatio = (ma_int64)(lastSuccessfulSeekOffset - pFlac->firstFLACFramePosInBytes) / ((ma_int64)(pcmRangeLo * pFlac->channels * pFlac->bitsPerSample)/8.0f); + if (pcmRangeLo > pcmFrameIndex) { + byteRangeHi = lastSuccessfulSeekOffset; + if (byteRangeLo > byteRangeHi) { + byteRangeLo = byteRangeHi; + } + targetByte = byteRangeLo + ((byteRangeHi - byteRangeLo) / 2); + if (targetByte < byteRangeLo) { + targetByte = byteRangeLo; + } + } else { + if ((pcmFrameIndex - pcmRangeLo) < seekForwardThreshold) { + if (ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame)) { + return MA_TRUE; + } else { + break; + } + } else { + byteRangeLo = lastSuccessfulSeekOffset; + if (byteRangeHi < byteRangeLo) { + byteRangeHi = byteRangeLo; + } + targetByte = lastSuccessfulSeekOffset + (ma_uint64)(((ma_int64)((pcmFrameIndex-pcmRangeLo) * pFlac->channels * pFlac->bitsPerSample)/8.0f) * approxCompressionRatio); + if (targetByte > byteRangeHi) { + targetByte = byteRangeHi; + } + if (closestSeekOffsetBeforeTargetPCMFrame < lastSuccessfulSeekOffset) { + closestSeekOffsetBeforeTargetPCMFrame = lastSuccessfulSeekOffset; + } + } + } + } + } else { + break; + } + } + ma_dr_flac__seek_to_first_frame(pFlac); + return MA_FALSE; +} +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__binary_search(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_uint64 byteRangeLo; + ma_uint64 byteRangeHi; + ma_uint32 seekForwardThreshold = (pFlac->maxBlockSizeInPCMFrames != 0) ? pFlac->maxBlockSizeInPCMFrames*2 : 4096; + if (ma_dr_flac__seek_to_first_frame(pFlac) == MA_FALSE) { + return MA_FALSE; + } + if (pcmFrameIndex < seekForwardThreshold) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFrameIndex) == pcmFrameIndex; + } + byteRangeLo = pFlac->firstFLACFramePosInBytes; + byteRangeHi = pFlac->firstFLACFramePosInBytes + (ma_uint64)((ma_int64)(pFlac->totalPCMFrameCount * pFlac->channels * pFlac->bitsPerSample)/8.0f); + return ma_dr_flac__seek_to_pcm_frame__binary_search_internal(pFlac, pcmFrameIndex, byteRangeLo, byteRangeHi); +} +#endif +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__seek_table(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_uint32 iClosestSeekpoint = 0; + ma_bool32 isMidFrame = MA_FALSE; + ma_uint64 runningPCMFrameCount; + ma_uint32 iSeekpoint; + MA_DR_FLAC_ASSERT(pFlac != NULL); + if (pFlac->pSeekpoints == NULL || pFlac->seekpointCount == 0) { + return MA_FALSE; + } + if (pFlac->pSeekpoints[0].firstPCMFrame > pcmFrameIndex) { + return MA_FALSE; + } + for (iSeekpoint = 0; iSeekpoint < pFlac->seekpointCount; ++iSeekpoint) { + if (pFlac->pSeekpoints[iSeekpoint].firstPCMFrame >= pcmFrameIndex) { + break; + } + iClosestSeekpoint = iSeekpoint; + } + if (pFlac->pSeekpoints[iClosestSeekpoint].pcmFrameCount == 0 || pFlac->pSeekpoints[iClosestSeekpoint].pcmFrameCount > pFlac->maxBlockSizeInPCMFrames) { + return MA_FALSE; + } + if (pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame > pFlac->totalPCMFrameCount && pFlac->totalPCMFrameCount > 0) { + return MA_FALSE; + } +#if !defined(MA_DR_FLAC_NO_CRC) + if (pFlac->totalPCMFrameCount > 0) { + ma_uint64 byteRangeLo; + ma_uint64 byteRangeHi; + byteRangeHi = pFlac->firstFLACFramePosInBytes + (ma_uint64)((ma_int64)(pFlac->totalPCMFrameCount * pFlac->channels * pFlac->bitsPerSample)/8.0f); + byteRangeLo = pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset; + if (iClosestSeekpoint < pFlac->seekpointCount-1) { + ma_uint32 iNextSeekpoint = iClosestSeekpoint + 1; + if (pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset >= pFlac->pSeekpoints[iNextSeekpoint].flacFrameOffset || pFlac->pSeekpoints[iNextSeekpoint].pcmFrameCount == 0) { + return MA_FALSE; + } + if (pFlac->pSeekpoints[iNextSeekpoint].firstPCMFrame != (((ma_uint64)0xFFFFFFFF << 32) | 0xFFFFFFFF)) { + byteRangeHi = pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iNextSeekpoint].flacFrameOffset - 1; + } + } + if (ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset)) { + if (ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &pFlac->currentPCMFrame, NULL); + if (ma_dr_flac__seek_to_pcm_frame__binary_search_internal(pFlac, pcmFrameIndex, byteRangeLo, byteRangeHi)) { + return MA_TRUE; + } + } + } + } +#endif + if (pcmFrameIndex >= pFlac->currentPCMFrame && pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame <= pFlac->currentPCMFrame) { + runningPCMFrameCount = pFlac->currentPCMFrame; + if (pFlac->currentPCMFrame == 0 && pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } else { + isMidFrame = MA_TRUE; + } + } else { + runningPCMFrameCount = pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame; + if (!ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } + for (;;) { + ma_uint64 pcmFrameCountInThisFLACFrame; + ma_uint64 firstPCMFrameInFLACFrame = 0; + ma_uint64 lastPCMFrameInFLACFrame = 0; + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame); + pcmFrameCountInThisFLACFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1; + if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFLACFrame)) { + ma_uint64 pcmFramesToDecode = pcmFrameIndex - runningPCMFrameCount; + if (!isMidFrame) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } + } else { + if (!isMidFrame) { + ma_result result = ma_dr_flac__seek_to_next_flac_frame(pFlac); + if (result == MA_SUCCESS) { + runningPCMFrameCount += pcmFrameCountInThisFLACFrame; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + runningPCMFrameCount += pFlac->currentFLACFrame.pcmFramesRemaining; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + isMidFrame = MA_FALSE; + } + if (pcmFrameIndex == pFlac->totalPCMFrameCount && runningPCMFrameCount == pFlac->totalPCMFrameCount) { + return MA_TRUE; + } + } + next_iteration: + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } +} +#ifndef MA_DR_FLAC_NO_OGG +typedef struct +{ + ma_uint8 capturePattern[4]; + ma_uint8 structureVersion; + ma_uint8 headerType; + ma_uint64 granulePosition; + ma_uint32 serialNumber; + ma_uint32 sequenceNumber; + ma_uint32 checksum; + ma_uint8 segmentCount; + ma_uint8 segmentTable[255]; +} ma_dr_flac_ogg_page_header; +#endif +typedef struct +{ + ma_dr_flac_read_proc onRead; + ma_dr_flac_seek_proc onSeek; + ma_dr_flac_meta_proc onMeta; + ma_dr_flac_container container; + void* pUserData; + void* pUserDataMD; + ma_uint32 sampleRate; + ma_uint8 channels; + ma_uint8 bitsPerSample; + ma_uint64 totalPCMFrameCount; + ma_uint16 maxBlockSizeInPCMFrames; + ma_uint64 runningFilePos; + ma_bool32 hasStreamInfoBlock; + ma_bool32 hasMetadataBlocks; + ma_dr_flac_bs bs; + ma_dr_flac_frame_header firstFrameHeader; +#ifndef MA_DR_FLAC_NO_OGG + ma_uint32 oggSerial; + ma_uint64 oggFirstBytePos; + ma_dr_flac_ogg_page_header oggBosHeader; +#endif +} ma_dr_flac_init_info; +static MA_INLINE void ma_dr_flac__decode_block_header(ma_uint32 blockHeader, ma_uint8* isLastBlock, ma_uint8* blockType, ma_uint32* blockSize) +{ + blockHeader = ma_dr_flac__be2host_32(blockHeader); + *isLastBlock = (ma_uint8)((blockHeader & 0x80000000UL) >> 31); + *blockType = (ma_uint8)((blockHeader & 0x7F000000UL) >> 24); + *blockSize = (blockHeader & 0x00FFFFFFUL); +} +static MA_INLINE ma_bool32 ma_dr_flac__read_and_decode_block_header(ma_dr_flac_read_proc onRead, void* pUserData, ma_uint8* isLastBlock, ma_uint8* blockType, ma_uint32* blockSize) +{ + ma_uint32 blockHeader; + *blockSize = 0; + if (onRead(pUserData, &blockHeader, 4) != 4) { + return MA_FALSE; + } + ma_dr_flac__decode_block_header(blockHeader, isLastBlock, blockType, blockSize); + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_streaminfo(ma_dr_flac_read_proc onRead, void* pUserData, ma_dr_flac_streaminfo* pStreamInfo) +{ + ma_uint32 blockSizes; + ma_uint64 frameSizes = 0; + ma_uint64 importantProps; + ma_uint8 md5[16]; + if (onRead(pUserData, &blockSizes, 4) != 4) { + return MA_FALSE; + } + if (onRead(pUserData, &frameSizes, 6) != 6) { + return MA_FALSE; + } + if (onRead(pUserData, &importantProps, 8) != 8) { + return MA_FALSE; + } + if (onRead(pUserData, md5, sizeof(md5)) != sizeof(md5)) { + return MA_FALSE; + } + blockSizes = ma_dr_flac__be2host_32(blockSizes); + frameSizes = ma_dr_flac__be2host_64(frameSizes); + importantProps = ma_dr_flac__be2host_64(importantProps); + pStreamInfo->minBlockSizeInPCMFrames = (ma_uint16)((blockSizes & 0xFFFF0000) >> 16); + pStreamInfo->maxBlockSizeInPCMFrames = (ma_uint16) (blockSizes & 0x0000FFFF); + pStreamInfo->minFrameSizeInPCMFrames = (ma_uint32)((frameSizes & (((ma_uint64)0x00FFFFFF << 16) << 24)) >> 40); + pStreamInfo->maxFrameSizeInPCMFrames = (ma_uint32)((frameSizes & (((ma_uint64)0x00FFFFFF << 16) << 0)) >> 16); + pStreamInfo->sampleRate = (ma_uint32)((importantProps & (((ma_uint64)0x000FFFFF << 16) << 28)) >> 44); + pStreamInfo->channels = (ma_uint8 )((importantProps & (((ma_uint64)0x0000000E << 16) << 24)) >> 41) + 1; + pStreamInfo->bitsPerSample = (ma_uint8 )((importantProps & (((ma_uint64)0x0000001F << 16) << 20)) >> 36) + 1; + pStreamInfo->totalPCMFrameCount = ((importantProps & ((((ma_uint64)0x0000000F << 16) << 16) | 0xFFFFFFFF))); + MA_DR_FLAC_COPY_MEMORY(pStreamInfo->md5, md5, sizeof(md5)); + return MA_TRUE; +} +static void* ma_dr_flac__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_FLAC_MALLOC(sz); +} +static void* ma_dr_flac__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_FLAC_REALLOC(p, sz); +} +static void ma_dr_flac__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_DR_FLAC_FREE(p); +} +static void* ma_dr_flac__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + return NULL; +} +static void* ma_dr_flac__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + if (p != NULL) { + MA_DR_FLAC_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + return p2; + } + return NULL; +} +static void ma_dr_flac__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} +static ma_bool32 ma_dr_flac__read_and_decode_metadata(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, void* pUserDataMD, ma_uint64* pFirstFramePos, ma_uint64* pSeektablePos, ma_uint32* pSeekpointCount, ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint64 runningFilePos = 42; + ma_uint64 seektablePos = 0; + ma_uint32 seektableSize = 0; + for (;;) { + ma_dr_flac_metadata metadata; + ma_uint8 isLastBlock = 0; + ma_uint8 blockType; + ma_uint32 blockSize; + if (ma_dr_flac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize) == MA_FALSE) { + return MA_FALSE; + } + runningFilePos += 4; + metadata.type = blockType; + metadata.pRawData = NULL; + metadata.rawDataSize = 0; + switch (blockType) + { + case MA_DR_FLAC_METADATA_BLOCK_TYPE_APPLICATION: + { + if (blockSize < 4) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + metadata.data.application.id = ma_dr_flac__be2host_32(*(ma_uint32*)pRawData); + metadata.data.application.pData = (const void*)((ma_uint8*)pRawData + sizeof(ma_uint32)); + metadata.data.application.dataSize = blockSize - sizeof(ma_uint32); + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_SEEKTABLE: + { + seektablePos = runningFilePos; + seektableSize = blockSize; + if (onMeta) { + ma_uint32 seekpointCount; + ma_uint32 iSeekpoint; + void* pRawData; + seekpointCount = blockSize/MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES; + pRawData = ma_dr_flac__malloc_from_callbacks(seekpointCount * sizeof(ma_dr_flac_seekpoint), pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + for (iSeekpoint = 0; iSeekpoint < seekpointCount; ++iSeekpoint) { + ma_dr_flac_seekpoint* pSeekpoint = (ma_dr_flac_seekpoint*)pRawData + iSeekpoint; + if (onRead(pUserData, pSeekpoint, MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) != MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pSeekpoint->firstPCMFrame = ma_dr_flac__be2host_64(pSeekpoint->firstPCMFrame); + pSeekpoint->flacFrameOffset = ma_dr_flac__be2host_64(pSeekpoint->flacFrameOffset); + pSeekpoint->pcmFrameCount = ma_dr_flac__be2host_16(pSeekpoint->pcmFrameCount); + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + metadata.data.seektable.seekpointCount = seekpointCount; + metadata.data.seektable.pSeekpoints = (const ma_dr_flac_seekpoint*)pRawData; + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_VORBIS_COMMENT: + { + if (blockSize < 8) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData; + const char* pRunningData; + const char* pRunningDataEnd; + ma_uint32 i; + pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + pRunningData = (const char*)pRawData; + pRunningDataEnd = (const char*)pRawData + blockSize; + metadata.data.vorbis_comment.vendorLength = ma_dr_flac__le2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) - 4 < (ma_int64)metadata.data.vorbis_comment.vendorLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.vorbis_comment.vendor = pRunningData; pRunningData += metadata.data.vorbis_comment.vendorLength; + metadata.data.vorbis_comment.commentCount = ma_dr_flac__le2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) / sizeof(ma_uint32) < metadata.data.vorbis_comment.commentCount) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.vorbis_comment.pComments = pRunningData; + for (i = 0; i < metadata.data.vorbis_comment.commentCount; ++i) { + ma_uint32 commentLength; + if (pRunningDataEnd - pRunningData < 4) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + commentLength = ma_dr_flac__le2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if (pRunningDataEnd - pRunningData < (ma_int64)commentLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningData += commentLength; + } + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_CUESHEET: + { + if (blockSize < 396) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData; + const char* pRunningData; + const char* pRunningDataEnd; + size_t bufferSize; + ma_uint8 iTrack; + ma_uint8 iIndex; + void* pTrackData; + pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + pRunningData = (const char*)pRawData; + pRunningDataEnd = (const char*)pRawData + blockSize; + MA_DR_FLAC_COPY_MEMORY(metadata.data.cuesheet.catalog, pRunningData, 128); pRunningData += 128; + metadata.data.cuesheet.leadInSampleCount = ma_dr_flac__be2host_64(*(const ma_uint64*)pRunningData); pRunningData += 8; + metadata.data.cuesheet.isCD = (pRunningData[0] & 0x80) != 0; pRunningData += 259; + metadata.data.cuesheet.trackCount = pRunningData[0]; pRunningData += 1; + metadata.data.cuesheet.pTrackData = NULL; + { + const char* pRunningDataSaved = pRunningData; + bufferSize = metadata.data.cuesheet.trackCount * MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES; + for (iTrack = 0; iTrack < metadata.data.cuesheet.trackCount; ++iTrack) { + ma_uint8 indexCount; + ma_uint32 indexPointSize; + if (pRunningDataEnd - pRunningData < MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningData += 35; + indexCount = pRunningData[0]; + pRunningData += 1; + bufferSize += indexCount * sizeof(ma_dr_flac_cuesheet_track_index); + indexPointSize = indexCount * MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES; + if (pRunningDataEnd - pRunningData < (ma_int64)indexPointSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningData += indexPointSize; + } + pRunningData = pRunningDataSaved; + } + { + char* pRunningTrackData; + pTrackData = ma_dr_flac__malloc_from_callbacks(bufferSize, pAllocationCallbacks); + if (pTrackData == NULL) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningTrackData = (char*)pTrackData; + for (iTrack = 0; iTrack < metadata.data.cuesheet.trackCount; ++iTrack) { + ma_uint8 indexCount; + MA_DR_FLAC_COPY_MEMORY(pRunningTrackData, pRunningData, MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES); + pRunningData += MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES-1; + pRunningTrackData += MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES-1; + indexCount = pRunningData[0]; + pRunningData += 1; + pRunningTrackData += 1; + for (iIndex = 0; iIndex < indexCount; ++iIndex) { + ma_dr_flac_cuesheet_track_index* pTrackIndex = (ma_dr_flac_cuesheet_track_index*)pRunningTrackData; + MA_DR_FLAC_COPY_MEMORY(pRunningTrackData, pRunningData, MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES); + pRunningData += MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES; + pRunningTrackData += sizeof(ma_dr_flac_cuesheet_track_index); + pTrackIndex->offset = ma_dr_flac__be2host_64(pTrackIndex->offset); + } + } + metadata.data.cuesheet.pTrackData = pTrackData; + } + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + pRawData = NULL; + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pTrackData, pAllocationCallbacks); + pTrackData = NULL; + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_PICTURE: + { + if (blockSize < 32) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData; + const char* pRunningData; + const char* pRunningDataEnd; + pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + pRunningData = (const char*)pRawData; + pRunningDataEnd = (const char*)pRawData + blockSize; + metadata.data.picture.type = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.mimeLength = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) - 24 < (ma_int64)metadata.data.picture.mimeLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.picture.mime = pRunningData; pRunningData += metadata.data.picture.mimeLength; + metadata.data.picture.descriptionLength = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) - 20 < (ma_int64)metadata.data.picture.descriptionLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.picture.description = pRunningData; pRunningData += metadata.data.picture.descriptionLength; + metadata.data.picture.width = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.height = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.colorDepth = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.indexColorCount = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.pictureDataSize = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.pPictureData = (const ma_uint8*)pRunningData; + if (pRunningDataEnd - pRunningData < (ma_int64)metadata.data.picture.pictureDataSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_PADDING: + { + if (onMeta) { + metadata.data.padding.unused = 0; + if (!onSeek(pUserData, blockSize, ma_dr_flac_seek_origin_current)) { + isLastBlock = MA_TRUE; + } else { + onMeta(pUserDataMD, &metadata); + } + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_INVALID: + { + if (onMeta) { + if (!onSeek(pUserData, blockSize, ma_dr_flac_seek_origin_current)) { + isLastBlock = MA_TRUE; + } + } + } break; + default: + { + if (onMeta) { + void* pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + } + if (onMeta == NULL && blockSize > 0) { + if (!onSeek(pUserData, blockSize, ma_dr_flac_seek_origin_current)) { + isLastBlock = MA_TRUE; + } + } + runningFilePos += blockSize; + if (isLastBlock) { + break; + } + } + *pSeektablePos = seektablePos; + *pSeekpointCount = seektableSize / MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES; + *pFirstFramePos = runningFilePos; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__init_private__native(ma_dr_flac_init_info* pInit, ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, void* pUserDataMD, ma_bool32 relaxed) +{ + ma_uint8 isLastBlock; + ma_uint8 blockType; + ma_uint32 blockSize; + (void)onSeek; + pInit->container = ma_dr_flac_container_native; + if (!ma_dr_flac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) { + return MA_FALSE; + } + if (blockType != MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) { + if (!relaxed) { + return MA_FALSE; + } else { + pInit->hasStreamInfoBlock = MA_FALSE; + pInit->hasMetadataBlocks = MA_FALSE; + if (!ma_dr_flac__read_next_flac_frame_header(&pInit->bs, 0, &pInit->firstFrameHeader)) { + return MA_FALSE; + } + if (pInit->firstFrameHeader.bitsPerSample == 0) { + return MA_FALSE; + } + pInit->sampleRate = pInit->firstFrameHeader.sampleRate; + pInit->channels = ma_dr_flac__get_channel_count_from_channel_assignment(pInit->firstFrameHeader.channelAssignment); + pInit->bitsPerSample = pInit->firstFrameHeader.bitsPerSample; + pInit->maxBlockSizeInPCMFrames = 65535; + return MA_TRUE; + } + } else { + ma_dr_flac_streaminfo streaminfo; + if (!ma_dr_flac__read_streaminfo(onRead, pUserData, &streaminfo)) { + return MA_FALSE; + } + pInit->hasStreamInfoBlock = MA_TRUE; + pInit->sampleRate = streaminfo.sampleRate; + pInit->channels = streaminfo.channels; + pInit->bitsPerSample = streaminfo.bitsPerSample; + pInit->totalPCMFrameCount = streaminfo.totalPCMFrameCount; + pInit->maxBlockSizeInPCMFrames = streaminfo.maxBlockSizeInPCMFrames; + pInit->hasMetadataBlocks = !isLastBlock; + if (onMeta) { + ma_dr_flac_metadata metadata; + metadata.type = MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO; + metadata.pRawData = NULL; + metadata.rawDataSize = 0; + metadata.data.streaminfo = streaminfo; + onMeta(pUserDataMD, &metadata); + } + return MA_TRUE; + } +} +#ifndef MA_DR_FLAC_NO_OGG +#define MA_DR_FLAC_OGG_MAX_PAGE_SIZE 65307 +#define MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32 1605413199 +typedef enum +{ + ma_dr_flac_ogg_recover_on_crc_mismatch, + ma_dr_flac_ogg_fail_on_crc_mismatch +} ma_dr_flac_ogg_crc_mismatch_recovery; +#ifndef MA_DR_FLAC_NO_CRC +static ma_uint32 ma_dr_flac__crc32_table[] = { + 0x00000000L, 0x04C11DB7L, 0x09823B6EL, 0x0D4326D9L, + 0x130476DCL, 0x17C56B6BL, 0x1A864DB2L, 0x1E475005L, + 0x2608EDB8L, 0x22C9F00FL, 0x2F8AD6D6L, 0x2B4BCB61L, + 0x350C9B64L, 0x31CD86D3L, 0x3C8EA00AL, 0x384FBDBDL, + 0x4C11DB70L, 0x48D0C6C7L, 0x4593E01EL, 0x4152FDA9L, + 0x5F15ADACL, 0x5BD4B01BL, 0x569796C2L, 0x52568B75L, + 0x6A1936C8L, 0x6ED82B7FL, 0x639B0DA6L, 0x675A1011L, + 0x791D4014L, 0x7DDC5DA3L, 0x709F7B7AL, 0x745E66CDL, + 0x9823B6E0L, 0x9CE2AB57L, 0x91A18D8EL, 0x95609039L, + 0x8B27C03CL, 0x8FE6DD8BL, 0x82A5FB52L, 0x8664E6E5L, + 0xBE2B5B58L, 0xBAEA46EFL, 0xB7A96036L, 0xB3687D81L, + 0xAD2F2D84L, 0xA9EE3033L, 0xA4AD16EAL, 0xA06C0B5DL, + 0xD4326D90L, 0xD0F37027L, 0xDDB056FEL, 0xD9714B49L, + 0xC7361B4CL, 0xC3F706FBL, 0xCEB42022L, 0xCA753D95L, + 0xF23A8028L, 0xF6FB9D9FL, 0xFBB8BB46L, 0xFF79A6F1L, + 0xE13EF6F4L, 0xE5FFEB43L, 0xE8BCCD9AL, 0xEC7DD02DL, + 0x34867077L, 0x30476DC0L, 0x3D044B19L, 0x39C556AEL, + 0x278206ABL, 0x23431B1CL, 0x2E003DC5L, 0x2AC12072L, + 0x128E9DCFL, 0x164F8078L, 0x1B0CA6A1L, 0x1FCDBB16L, + 0x018AEB13L, 0x054BF6A4L, 0x0808D07DL, 0x0CC9CDCAL, + 0x7897AB07L, 0x7C56B6B0L, 0x71159069L, 0x75D48DDEL, + 0x6B93DDDBL, 0x6F52C06CL, 0x6211E6B5L, 0x66D0FB02L, + 0x5E9F46BFL, 0x5A5E5B08L, 0x571D7DD1L, 0x53DC6066L, + 0x4D9B3063L, 0x495A2DD4L, 0x44190B0DL, 0x40D816BAL, + 0xACA5C697L, 0xA864DB20L, 0xA527FDF9L, 0xA1E6E04EL, + 0xBFA1B04BL, 0xBB60ADFCL, 0xB6238B25L, 0xB2E29692L, + 0x8AAD2B2FL, 0x8E6C3698L, 0x832F1041L, 0x87EE0DF6L, + 0x99A95DF3L, 0x9D684044L, 0x902B669DL, 0x94EA7B2AL, + 0xE0B41DE7L, 0xE4750050L, 0xE9362689L, 0xEDF73B3EL, + 0xF3B06B3BL, 0xF771768CL, 0xFA325055L, 0xFEF34DE2L, + 0xC6BCF05FL, 0xC27DEDE8L, 0xCF3ECB31L, 0xCBFFD686L, + 0xD5B88683L, 0xD1799B34L, 0xDC3ABDEDL, 0xD8FBA05AL, + 0x690CE0EEL, 0x6DCDFD59L, 0x608EDB80L, 0x644FC637L, + 0x7A089632L, 0x7EC98B85L, 0x738AAD5CL, 0x774BB0EBL, + 0x4F040D56L, 0x4BC510E1L, 0x46863638L, 0x42472B8FL, + 0x5C007B8AL, 0x58C1663DL, 0x558240E4L, 0x51435D53L, + 0x251D3B9EL, 0x21DC2629L, 0x2C9F00F0L, 0x285E1D47L, + 0x36194D42L, 0x32D850F5L, 0x3F9B762CL, 0x3B5A6B9BL, + 0x0315D626L, 0x07D4CB91L, 0x0A97ED48L, 0x0E56F0FFL, + 0x1011A0FAL, 0x14D0BD4DL, 0x19939B94L, 0x1D528623L, + 0xF12F560EL, 0xF5EE4BB9L, 0xF8AD6D60L, 0xFC6C70D7L, + 0xE22B20D2L, 0xE6EA3D65L, 0xEBA91BBCL, 0xEF68060BL, + 0xD727BBB6L, 0xD3E6A601L, 0xDEA580D8L, 0xDA649D6FL, + 0xC423CD6AL, 0xC0E2D0DDL, 0xCDA1F604L, 0xC960EBB3L, + 0xBD3E8D7EL, 0xB9FF90C9L, 0xB4BCB610L, 0xB07DABA7L, + 0xAE3AFBA2L, 0xAAFBE615L, 0xA7B8C0CCL, 0xA379DD7BL, + 0x9B3660C6L, 0x9FF77D71L, 0x92B45BA8L, 0x9675461FL, + 0x8832161AL, 0x8CF30BADL, 0x81B02D74L, 0x857130C3L, + 0x5D8A9099L, 0x594B8D2EL, 0x5408ABF7L, 0x50C9B640L, + 0x4E8EE645L, 0x4A4FFBF2L, 0x470CDD2BL, 0x43CDC09CL, + 0x7B827D21L, 0x7F436096L, 0x7200464FL, 0x76C15BF8L, + 0x68860BFDL, 0x6C47164AL, 0x61043093L, 0x65C52D24L, + 0x119B4BE9L, 0x155A565EL, 0x18197087L, 0x1CD86D30L, + 0x029F3D35L, 0x065E2082L, 0x0B1D065BL, 0x0FDC1BECL, + 0x3793A651L, 0x3352BBE6L, 0x3E119D3FL, 0x3AD08088L, + 0x2497D08DL, 0x2056CD3AL, 0x2D15EBE3L, 0x29D4F654L, + 0xC5A92679L, 0xC1683BCEL, 0xCC2B1D17L, 0xC8EA00A0L, + 0xD6AD50A5L, 0xD26C4D12L, 0xDF2F6BCBL, 0xDBEE767CL, + 0xE3A1CBC1L, 0xE760D676L, 0xEA23F0AFL, 0xEEE2ED18L, + 0xF0A5BD1DL, 0xF464A0AAL, 0xF9278673L, 0xFDE69BC4L, + 0x89B8FD09L, 0x8D79E0BEL, 0x803AC667L, 0x84FBDBD0L, + 0x9ABC8BD5L, 0x9E7D9662L, 0x933EB0BBL, 0x97FFAD0CL, + 0xAFB010B1L, 0xAB710D06L, 0xA6322BDFL, 0xA2F33668L, + 0xBCB4666DL, 0xB8757BDAL, 0xB5365D03L, 0xB1F740B4L +}; +#endif +static MA_INLINE ma_uint32 ma_dr_flac_crc32_byte(ma_uint32 crc32, ma_uint8 data) +{ +#ifndef MA_DR_FLAC_NO_CRC + return (crc32 << 8) ^ ma_dr_flac__crc32_table[(ma_uint8)((crc32 >> 24) & 0xFF) ^ data]; +#else + (void)data; + return crc32; +#endif +} +#if 0 +static MA_INLINE ma_uint32 ma_dr_flac_crc32_uint32(ma_uint32 crc32, ma_uint32 data) +{ + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 24) & 0xFF)); + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 16) & 0xFF)); + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 8) & 0xFF)); + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 0) & 0xFF)); + return crc32; +} +static MA_INLINE ma_uint32 ma_dr_flac_crc32_uint64(ma_uint32 crc32, ma_uint64 data) +{ + crc32 = ma_dr_flac_crc32_uint32(crc32, (ma_uint32)((data >> 32) & 0xFFFFFFFF)); + crc32 = ma_dr_flac_crc32_uint32(crc32, (ma_uint32)((data >> 0) & 0xFFFFFFFF)); + return crc32; +} +#endif +static MA_INLINE ma_uint32 ma_dr_flac_crc32_buffer(ma_uint32 crc32, ma_uint8* pData, ma_uint32 dataSize) +{ + ma_uint32 i; + for (i = 0; i < dataSize; ++i) { + crc32 = ma_dr_flac_crc32_byte(crc32, pData[i]); + } + return crc32; +} +static MA_INLINE ma_bool32 ma_dr_flac_ogg__is_capture_pattern(ma_uint8 pattern[4]) +{ + return pattern[0] == 'O' && pattern[1] == 'g' && pattern[2] == 'g' && pattern[3] == 'S'; +} +static MA_INLINE ma_uint32 ma_dr_flac_ogg__get_page_header_size(ma_dr_flac_ogg_page_header* pHeader) +{ + return 27 + pHeader->segmentCount; +} +static MA_INLINE ma_uint32 ma_dr_flac_ogg__get_page_body_size(ma_dr_flac_ogg_page_header* pHeader) +{ + ma_uint32 pageBodySize = 0; + int i; + for (i = 0; i < pHeader->segmentCount; ++i) { + pageBodySize += pHeader->segmentTable[i]; + } + return pageBodySize; +} +static ma_result ma_dr_flac_ogg__read_page_header_after_capture_pattern(ma_dr_flac_read_proc onRead, void* pUserData, ma_dr_flac_ogg_page_header* pHeader, ma_uint32* pBytesRead, ma_uint32* pCRC32) +{ + ma_uint8 data[23]; + ma_uint32 i; + MA_DR_FLAC_ASSERT(*pCRC32 == MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32); + if (onRead(pUserData, data, 23) != 23) { + return MA_AT_END; + } + *pBytesRead += 23; + pHeader->capturePattern[0] = 'O'; + pHeader->capturePattern[1] = 'g'; + pHeader->capturePattern[2] = 'g'; + pHeader->capturePattern[3] = 'S'; + pHeader->structureVersion = data[0]; + pHeader->headerType = data[1]; + MA_DR_FLAC_COPY_MEMORY(&pHeader->granulePosition, &data[ 2], 8); + MA_DR_FLAC_COPY_MEMORY(&pHeader->serialNumber, &data[10], 4); + MA_DR_FLAC_COPY_MEMORY(&pHeader->sequenceNumber, &data[14], 4); + MA_DR_FLAC_COPY_MEMORY(&pHeader->checksum, &data[18], 4); + pHeader->segmentCount = data[22]; + data[18] = 0; + data[19] = 0; + data[20] = 0; + data[21] = 0; + for (i = 0; i < 23; ++i) { + *pCRC32 = ma_dr_flac_crc32_byte(*pCRC32, data[i]); + } + if (onRead(pUserData, pHeader->segmentTable, pHeader->segmentCount) != pHeader->segmentCount) { + return MA_AT_END; + } + *pBytesRead += pHeader->segmentCount; + for (i = 0; i < pHeader->segmentCount; ++i) { + *pCRC32 = ma_dr_flac_crc32_byte(*pCRC32, pHeader->segmentTable[i]); + } + return MA_SUCCESS; +} +static ma_result ma_dr_flac_ogg__read_page_header(ma_dr_flac_read_proc onRead, void* pUserData, ma_dr_flac_ogg_page_header* pHeader, ma_uint32* pBytesRead, ma_uint32* pCRC32) +{ + ma_uint8 id[4]; + *pBytesRead = 0; + if (onRead(pUserData, id, 4) != 4) { + return MA_AT_END; + } + *pBytesRead += 4; + for (;;) { + if (ma_dr_flac_ogg__is_capture_pattern(id)) { + ma_result result; + *pCRC32 = MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32; + result = ma_dr_flac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, pHeader, pBytesRead, pCRC32); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } else { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return result; + } + } + } else { + id[0] = id[1]; + id[1] = id[2]; + id[2] = id[3]; + if (onRead(pUserData, &id[3], 1) != 1) { + return MA_AT_END; + } + *pBytesRead += 1; + } + } +} +typedef struct +{ + ma_dr_flac_read_proc onRead; + ma_dr_flac_seek_proc onSeek; + void* pUserData; + ma_uint64 currentBytePos; + ma_uint64 firstBytePos; + ma_uint32 serialNumber; + ma_dr_flac_ogg_page_header bosPageHeader; + ma_dr_flac_ogg_page_header currentPageHeader; + ma_uint32 bytesRemainingInPage; + ma_uint32 pageDataSize; + ma_uint8 pageData[MA_DR_FLAC_OGG_MAX_PAGE_SIZE]; +} ma_dr_flac_oggbs; +static size_t ma_dr_flac_oggbs__read_physical(ma_dr_flac_oggbs* oggbs, void* bufferOut, size_t bytesToRead) +{ + size_t bytesActuallyRead = oggbs->onRead(oggbs->pUserData, bufferOut, bytesToRead); + oggbs->currentBytePos += bytesActuallyRead; + return bytesActuallyRead; +} +static ma_bool32 ma_dr_flac_oggbs__seek_physical(ma_dr_flac_oggbs* oggbs, ma_uint64 offset, ma_dr_flac_seek_origin origin) +{ + if (origin == ma_dr_flac_seek_origin_start) { + if (offset <= 0x7FFFFFFF) { + if (!oggbs->onSeek(oggbs->pUserData, (int)offset, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + oggbs->currentBytePos = offset; + return MA_TRUE; + } else { + if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + oggbs->currentBytePos = offset; + return ma_dr_flac_oggbs__seek_physical(oggbs, offset - 0x7FFFFFFF, ma_dr_flac_seek_origin_current); + } + } else { + while (offset > 0x7FFFFFFF) { + if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + oggbs->currentBytePos += 0x7FFFFFFF; + offset -= 0x7FFFFFFF; + } + if (!oggbs->onSeek(oggbs->pUserData, (int)offset, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + oggbs->currentBytePos += offset; + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac_oggbs__goto_next_page(ma_dr_flac_oggbs* oggbs, ma_dr_flac_ogg_crc_mismatch_recovery recoveryMethod) +{ + ma_dr_flac_ogg_page_header header; + for (;;) { + ma_uint32 crc32 = 0; + ma_uint32 bytesRead; + ma_uint32 pageBodySize; +#ifndef MA_DR_FLAC_NO_CRC + ma_uint32 actualCRC32; +#endif + if (ma_dr_flac_ogg__read_page_header(oggbs->onRead, oggbs->pUserData, &header, &bytesRead, &crc32) != MA_SUCCESS) { + return MA_FALSE; + } + oggbs->currentBytePos += bytesRead; + pageBodySize = ma_dr_flac_ogg__get_page_body_size(&header); + if (pageBodySize > MA_DR_FLAC_OGG_MAX_PAGE_SIZE) { + continue; + } + if (header.serialNumber != oggbs->serialNumber) { + if (pageBodySize > 0 && !ma_dr_flac_oggbs__seek_physical(oggbs, pageBodySize, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + continue; + } + if (ma_dr_flac_oggbs__read_physical(oggbs, oggbs->pageData, pageBodySize) != pageBodySize) { + return MA_FALSE; + } + oggbs->pageDataSize = pageBodySize; +#ifndef MA_DR_FLAC_NO_CRC + actualCRC32 = ma_dr_flac_crc32_buffer(crc32, oggbs->pageData, oggbs->pageDataSize); + if (actualCRC32 != header.checksum) { + if (recoveryMethod == ma_dr_flac_ogg_recover_on_crc_mismatch) { + continue; + } else { + ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch); + return MA_FALSE; + } + } +#else + (void)recoveryMethod; +#endif + oggbs->currentPageHeader = header; + oggbs->bytesRemainingInPage = pageBodySize; + return MA_TRUE; + } +} +#if 0 +static ma_uint8 ma_dr_flac_oggbs__get_current_segment_index(ma_dr_flac_oggbs* oggbs, ma_uint8* pBytesRemainingInSeg) +{ + ma_uint32 bytesConsumedInPage = ma_dr_flac_ogg__get_page_body_size(&oggbs->currentPageHeader) - oggbs->bytesRemainingInPage; + ma_uint8 iSeg = 0; + ma_uint32 iByte = 0; + while (iByte < bytesConsumedInPage) { + ma_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg]; + if (iByte + segmentSize > bytesConsumedInPage) { + break; + } else { + iSeg += 1; + iByte += segmentSize; + } + } + *pBytesRemainingInSeg = oggbs->currentPageHeader.segmentTable[iSeg] - (ma_uint8)(bytesConsumedInPage - iByte); + return iSeg; +} +static ma_bool32 ma_dr_flac_oggbs__seek_to_next_packet(ma_dr_flac_oggbs* oggbs) +{ + for (;;) { + ma_bool32 atEndOfPage = MA_FALSE; + ma_uint8 bytesRemainingInSeg; + ma_uint8 iFirstSeg = ma_dr_flac_oggbs__get_current_segment_index(oggbs, &bytesRemainingInSeg); + ma_uint32 bytesToEndOfPacketOrPage = bytesRemainingInSeg; + for (ma_uint8 iSeg = iFirstSeg; iSeg < oggbs->currentPageHeader.segmentCount; ++iSeg) { + ma_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg]; + if (segmentSize < 255) { + if (iSeg == oggbs->currentPageHeader.segmentCount-1) { + atEndOfPage = MA_TRUE; + } + break; + } + bytesToEndOfPacketOrPage += segmentSize; + } + ma_dr_flac_oggbs__seek_physical(oggbs, bytesToEndOfPacketOrPage, ma_dr_flac_seek_origin_current); + oggbs->bytesRemainingInPage -= bytesToEndOfPacketOrPage; + if (atEndOfPage) { + if (!ma_dr_flac_oggbs__goto_next_page(oggbs)) { + return MA_FALSE; + } + if ((oggbs->currentPageHeader.headerType & 0x01) == 0) { + return MA_TRUE; + } + } else { + return MA_TRUE; + } + } +} +static ma_bool32 ma_dr_flac_oggbs__seek_to_next_frame(ma_dr_flac_oggbs* oggbs) +{ + return ma_dr_flac_oggbs__seek_to_next_packet(oggbs); +} +#endif +static size_t ma_dr_flac__on_read_ogg(void* pUserData, void* bufferOut, size_t bytesToRead) +{ + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pUserData; + ma_uint8* pRunningBufferOut = (ma_uint8*)bufferOut; + size_t bytesRead = 0; + MA_DR_FLAC_ASSERT(oggbs != NULL); + MA_DR_FLAC_ASSERT(pRunningBufferOut != NULL); + while (bytesRead < bytesToRead) { + size_t bytesRemainingToRead = bytesToRead - bytesRead; + if (oggbs->bytesRemainingInPage >= bytesRemainingToRead) { + MA_DR_FLAC_COPY_MEMORY(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), bytesRemainingToRead); + bytesRead += bytesRemainingToRead; + oggbs->bytesRemainingInPage -= (ma_uint32)bytesRemainingToRead; + break; + } + if (oggbs->bytesRemainingInPage > 0) { + MA_DR_FLAC_COPY_MEMORY(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), oggbs->bytesRemainingInPage); + bytesRead += oggbs->bytesRemainingInPage; + pRunningBufferOut += oggbs->bytesRemainingInPage; + oggbs->bytesRemainingInPage = 0; + } + MA_DR_FLAC_ASSERT(bytesRemainingToRead > 0); + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch)) { + break; + } + } + return bytesRead; +} +static ma_bool32 ma_dr_flac__on_seek_ogg(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pUserData; + int bytesSeeked = 0; + MA_DR_FLAC_ASSERT(oggbs != NULL); + MA_DR_FLAC_ASSERT(offset >= 0); + if (origin == ma_dr_flac_seek_origin_start) { + if (!ma_dr_flac_oggbs__seek_physical(oggbs, (int)oggbs->firstBytePos, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_fail_on_crc_mismatch)) { + return MA_FALSE; + } + return ma_dr_flac__on_seek_ogg(pUserData, offset, ma_dr_flac_seek_origin_current); + } + MA_DR_FLAC_ASSERT(origin == ma_dr_flac_seek_origin_current); + while (bytesSeeked < offset) { + int bytesRemainingToSeek = offset - bytesSeeked; + MA_DR_FLAC_ASSERT(bytesRemainingToSeek >= 0); + if (oggbs->bytesRemainingInPage >= (size_t)bytesRemainingToSeek) { + bytesSeeked += bytesRemainingToSeek; + (void)bytesSeeked; + oggbs->bytesRemainingInPage -= bytesRemainingToSeek; + break; + } + if (oggbs->bytesRemainingInPage > 0) { + bytesSeeked += (int)oggbs->bytesRemainingInPage; + oggbs->bytesRemainingInPage = 0; + } + MA_DR_FLAC_ASSERT(bytesRemainingToSeek > 0); + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_fail_on_crc_mismatch)) { + return MA_FALSE; + } + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac_ogg__seek_to_pcm_frame(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + ma_uint64 originalBytePos; + ma_uint64 runningGranulePosition; + ma_uint64 runningFrameBytePos; + ma_uint64 runningPCMFrameCount; + MA_DR_FLAC_ASSERT(oggbs != NULL); + originalBytePos = oggbs->currentBytePos; + if (!ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes)) { + return MA_FALSE; + } + oggbs->bytesRemainingInPage = 0; + runningGranulePosition = 0; + for (;;) { + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch)) { + ma_dr_flac_oggbs__seek_physical(oggbs, originalBytePos, ma_dr_flac_seek_origin_start); + return MA_FALSE; + } + runningFrameBytePos = oggbs->currentBytePos - ma_dr_flac_ogg__get_page_header_size(&oggbs->currentPageHeader) - oggbs->pageDataSize; + if (oggbs->currentPageHeader.granulePosition >= pcmFrameIndex) { + break; + } + if ((oggbs->currentPageHeader.headerType & 0x01) == 0) { + if (oggbs->currentPageHeader.segmentTable[0] >= 2) { + ma_uint8 firstBytesInPage[2]; + firstBytesInPage[0] = oggbs->pageData[0]; + firstBytesInPage[1] = oggbs->pageData[1]; + if ((firstBytesInPage[0] == 0xFF) && (firstBytesInPage[1] & 0xFC) == 0xF8) { + runningGranulePosition = oggbs->currentPageHeader.granulePosition; + } + continue; + } + } + } + if (!ma_dr_flac_oggbs__seek_physical(oggbs, runningFrameBytePos, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch)) { + return MA_FALSE; + } + runningPCMFrameCount = runningGranulePosition; + for (;;) { + ma_uint64 firstPCMFrameInFLACFrame = 0; + ma_uint64 lastPCMFrameInFLACFrame = 0; + ma_uint64 pcmFrameCountInThisFrame; + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame); + pcmFrameCountInThisFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1; + if (pcmFrameIndex == pFlac->totalPCMFrameCount && (runningPCMFrameCount + pcmFrameCountInThisFrame) == pFlac->totalPCMFrameCount) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + pFlac->currentPCMFrame = pcmFrameIndex; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + return MA_TRUE; + } else { + return MA_FALSE; + } + } + if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFrame)) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + ma_uint64 pcmFramesToDecode = (size_t)(pcmFrameIndex - runningPCMFrameCount); + if (pcmFramesToDecode == 0) { + return MA_TRUE; + } + pFlac->currentPCMFrame = runningPCMFrameCount; + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } else { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return MA_FALSE; + } + } + } else { + ma_result result = ma_dr_flac__seek_to_next_flac_frame(pFlac); + if (result == MA_SUCCESS) { + runningPCMFrameCount += pcmFrameCountInThisFrame; + } else { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return MA_FALSE; + } + } + } + } +} +static ma_bool32 ma_dr_flac__init_private__ogg(ma_dr_flac_init_info* pInit, ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, void* pUserDataMD, ma_bool32 relaxed) +{ + ma_dr_flac_ogg_page_header header; + ma_uint32 crc32 = MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32; + ma_uint32 bytesRead = 0; + (void)relaxed; + pInit->container = ma_dr_flac_container_ogg; + pInit->oggFirstBytePos = 0; + if (ma_dr_flac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, &header, &bytesRead, &crc32) != MA_SUCCESS) { + return MA_FALSE; + } + pInit->runningFilePos += bytesRead; + for (;;) { + int pageBodySize; + if ((header.headerType & 0x02) == 0) { + return MA_FALSE; + } + pageBodySize = ma_dr_flac_ogg__get_page_body_size(&header); + if (pageBodySize == 51) { + ma_uint32 bytesRemainingInPage = pageBodySize; + ma_uint8 packetType; + if (onRead(pUserData, &packetType, 1) != 1) { + return MA_FALSE; + } + bytesRemainingInPage -= 1; + if (packetType == 0x7F) { + ma_uint8 sig[4]; + if (onRead(pUserData, sig, 4) != 4) { + return MA_FALSE; + } + bytesRemainingInPage -= 4; + if (sig[0] == 'F' && sig[1] == 'L' && sig[2] == 'A' && sig[3] == 'C') { + ma_uint8 mappingVersion[2]; + if (onRead(pUserData, mappingVersion, 2) != 2) { + return MA_FALSE; + } + if (mappingVersion[0] != 1) { + return MA_FALSE; + } + if (!onSeek(pUserData, 2, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + if (onRead(pUserData, sig, 4) != 4) { + return MA_FALSE; + } + if (sig[0] == 'f' && sig[1] == 'L' && sig[2] == 'a' && sig[3] == 'C') { + ma_dr_flac_streaminfo streaminfo; + ma_uint8 isLastBlock; + ma_uint8 blockType; + ma_uint32 blockSize; + if (!ma_dr_flac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) { + return MA_FALSE; + } + if (blockType != MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) { + return MA_FALSE; + } + if (ma_dr_flac__read_streaminfo(onRead, pUserData, &streaminfo)) { + pInit->hasStreamInfoBlock = MA_TRUE; + pInit->sampleRate = streaminfo.sampleRate; + pInit->channels = streaminfo.channels; + pInit->bitsPerSample = streaminfo.bitsPerSample; + pInit->totalPCMFrameCount = streaminfo.totalPCMFrameCount; + pInit->maxBlockSizeInPCMFrames = streaminfo.maxBlockSizeInPCMFrames; + pInit->hasMetadataBlocks = !isLastBlock; + if (onMeta) { + ma_dr_flac_metadata metadata; + metadata.type = MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO; + metadata.pRawData = NULL; + metadata.rawDataSize = 0; + metadata.data.streaminfo = streaminfo; + onMeta(pUserDataMD, &metadata); + } + pInit->runningFilePos += pageBodySize; + pInit->oggFirstBytePos = pInit->runningFilePos - 79; + pInit->oggSerial = header.serialNumber; + pInit->oggBosHeader = header; + break; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + } else { + if (!onSeek(pUserData, bytesRemainingInPage, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + } else { + if (!onSeek(pUserData, bytesRemainingInPage, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + } else { + if (!onSeek(pUserData, pageBodySize, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + pInit->runningFilePos += pageBodySize; + if (ma_dr_flac_ogg__read_page_header(onRead, pUserData, &header, &bytesRead, &crc32) != MA_SUCCESS) { + return MA_FALSE; + } + pInit->runningFilePos += bytesRead; + } + pInit->hasMetadataBlocks = MA_TRUE; + return MA_TRUE; +} +#endif +static ma_bool32 ma_dr_flac__init_private(ma_dr_flac_init_info* pInit, ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, void* pUserDataMD) +{ + ma_bool32 relaxed; + ma_uint8 id[4]; + if (pInit == NULL || onRead == NULL || onSeek == NULL) { + return MA_FALSE; + } + MA_DR_FLAC_ZERO_MEMORY(pInit, sizeof(*pInit)); + pInit->onRead = onRead; + pInit->onSeek = onSeek; + pInit->onMeta = onMeta; + pInit->container = container; + pInit->pUserData = pUserData; + pInit->pUserDataMD = pUserDataMD; + pInit->bs.onRead = onRead; + pInit->bs.onSeek = onSeek; + pInit->bs.pUserData = pUserData; + ma_dr_flac__reset_cache(&pInit->bs); + relaxed = container != ma_dr_flac_container_unknown; + for (;;) { + if (onRead(pUserData, id, 4) != 4) { + return MA_FALSE; + } + pInit->runningFilePos += 4; + if (id[0] == 'I' && id[1] == 'D' && id[2] == '3') { + ma_uint8 header[6]; + ma_uint8 flags; + ma_uint32 headerSize; + if (onRead(pUserData, header, 6) != 6) { + return MA_FALSE; + } + pInit->runningFilePos += 6; + flags = header[1]; + MA_DR_FLAC_COPY_MEMORY(&headerSize, header+2, 4); + headerSize = ma_dr_flac__unsynchsafe_32(ma_dr_flac__be2host_32(headerSize)); + if (flags & 0x10) { + headerSize += 10; + } + if (!onSeek(pUserData, headerSize, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + pInit->runningFilePos += headerSize; + } else { + break; + } + } + if (id[0] == 'f' && id[1] == 'L' && id[2] == 'a' && id[3] == 'C') { + return ma_dr_flac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#ifndef MA_DR_FLAC_NO_OGG + if (id[0] == 'O' && id[1] == 'g' && id[2] == 'g' && id[3] == 'S') { + return ma_dr_flac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#endif + if (relaxed) { + if (container == ma_dr_flac_container_native) { + return ma_dr_flac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#ifndef MA_DR_FLAC_NO_OGG + if (container == ma_dr_flac_container_ogg) { + return ma_dr_flac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#endif + } + return MA_FALSE; +} +static void ma_dr_flac__init_from_info(ma_dr_flac* pFlac, const ma_dr_flac_init_info* pInit) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + MA_DR_FLAC_ASSERT(pInit != NULL); + MA_DR_FLAC_ZERO_MEMORY(pFlac, sizeof(*pFlac)); + pFlac->bs = pInit->bs; + pFlac->onMeta = pInit->onMeta; + pFlac->pUserDataMD = pInit->pUserDataMD; + pFlac->maxBlockSizeInPCMFrames = pInit->maxBlockSizeInPCMFrames; + pFlac->sampleRate = pInit->sampleRate; + pFlac->channels = (ma_uint8)pInit->channels; + pFlac->bitsPerSample = (ma_uint8)pInit->bitsPerSample; + pFlac->totalPCMFrameCount = pInit->totalPCMFrameCount; + pFlac->container = pInit->container; +} +static ma_dr_flac* ma_dr_flac_open_with_metadata_private(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, void* pUserDataMD, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac_init_info init; + ma_uint32 allocationSize; + ma_uint32 wholeSIMDVectorCountPerChannel; + ma_uint32 decodedSamplesAllocationSize; +#ifndef MA_DR_FLAC_NO_OGG + ma_dr_flac_oggbs* pOggbs = NULL; +#endif + ma_uint64 firstFramePos; + ma_uint64 seektablePos; + ma_uint32 seekpointCount; + ma_allocation_callbacks allocationCallbacks; + ma_dr_flac* pFlac; + ma_dr_flac__init_cpu_caps(); + if (!ma_dr_flac__init_private(&init, onRead, onSeek, onMeta, container, pUserData, pUserDataMD)) { + return NULL; + } + if (pAllocationCallbacks != NULL) { + allocationCallbacks = *pAllocationCallbacks; + if (allocationCallbacks.onFree == NULL || (allocationCallbacks.onMalloc == NULL && allocationCallbacks.onRealloc == NULL)) { + return NULL; + } + } else { + allocationCallbacks.pUserData = NULL; + allocationCallbacks.onMalloc = ma_dr_flac__malloc_default; + allocationCallbacks.onRealloc = ma_dr_flac__realloc_default; + allocationCallbacks.onFree = ma_dr_flac__free_default; + } + allocationSize = sizeof(ma_dr_flac); + if ((init.maxBlockSizeInPCMFrames % (MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE / sizeof(ma_int32))) == 0) { + wholeSIMDVectorCountPerChannel = (init.maxBlockSizeInPCMFrames / (MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE / sizeof(ma_int32))); + } else { + wholeSIMDVectorCountPerChannel = (init.maxBlockSizeInPCMFrames / (MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE / sizeof(ma_int32))) + 1; + } + decodedSamplesAllocationSize = wholeSIMDVectorCountPerChannel * MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE * init.channels; + allocationSize += decodedSamplesAllocationSize; + allocationSize += MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE; +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) { + allocationSize += sizeof(ma_dr_flac_oggbs); + pOggbs = (ma_dr_flac_oggbs*)ma_dr_flac__malloc_from_callbacks(sizeof(*pOggbs), &allocationCallbacks); + if (pOggbs == NULL) { + return NULL; + } + MA_DR_FLAC_ZERO_MEMORY(pOggbs, sizeof(*pOggbs)); + pOggbs->onRead = onRead; + pOggbs->onSeek = onSeek; + pOggbs->pUserData = pUserData; + pOggbs->currentBytePos = init.oggFirstBytePos; + pOggbs->firstBytePos = init.oggFirstBytePos; + pOggbs->serialNumber = init.oggSerial; + pOggbs->bosPageHeader = init.oggBosHeader; + pOggbs->bytesRemainingInPage = 0; + } +#endif + firstFramePos = 42; + seektablePos = 0; + seekpointCount = 0; + if (init.hasMetadataBlocks) { + ma_dr_flac_read_proc onReadOverride = onRead; + ma_dr_flac_seek_proc onSeekOverride = onSeek; + void* pUserDataOverride = pUserData; +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) { + onReadOverride = ma_dr_flac__on_read_ogg; + onSeekOverride = ma_dr_flac__on_seek_ogg; + pUserDataOverride = (void*)pOggbs; + } +#endif + if (!ma_dr_flac__read_and_decode_metadata(onReadOverride, onSeekOverride, onMeta, pUserDataOverride, pUserDataMD, &firstFramePos, &seektablePos, &seekpointCount, &allocationCallbacks)) { +#ifndef MA_DR_FLAC_NO_OGG + ma_dr_flac__free_from_callbacks(pOggbs, &allocationCallbacks); +#endif + return NULL; + } + allocationSize += seekpointCount * sizeof(ma_dr_flac_seekpoint); + } + pFlac = (ma_dr_flac*)ma_dr_flac__malloc_from_callbacks(allocationSize, &allocationCallbacks); + if (pFlac == NULL) { +#ifndef MA_DR_FLAC_NO_OGG + ma_dr_flac__free_from_callbacks(pOggbs, &allocationCallbacks); +#endif + return NULL; + } + ma_dr_flac__init_from_info(pFlac, &init); + pFlac->allocationCallbacks = allocationCallbacks; + pFlac->pDecodedSamples = (ma_int32*)ma_dr_flac_align((size_t)pFlac->pExtraData, MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE); +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) { + ma_dr_flac_oggbs* pInternalOggbs = (ma_dr_flac_oggbs*)((ma_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize + (seekpointCount * sizeof(ma_dr_flac_seekpoint))); + MA_DR_FLAC_COPY_MEMORY(pInternalOggbs, pOggbs, sizeof(*pOggbs)); + ma_dr_flac__free_from_callbacks(pOggbs, &allocationCallbacks); + pOggbs = NULL; + pFlac->bs.onRead = ma_dr_flac__on_read_ogg; + pFlac->bs.onSeek = ma_dr_flac__on_seek_ogg; + pFlac->bs.pUserData = (void*)pInternalOggbs; + pFlac->_oggbs = (void*)pInternalOggbs; + } +#endif + pFlac->firstFLACFramePosInBytes = firstFramePos; +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) + { + pFlac->pSeekpoints = NULL; + pFlac->seekpointCount = 0; + } + else +#endif + { + if (seektablePos != 0) { + pFlac->seekpointCount = seekpointCount; + pFlac->pSeekpoints = (ma_dr_flac_seekpoint*)((ma_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize); + MA_DR_FLAC_ASSERT(pFlac->bs.onSeek != NULL); + MA_DR_FLAC_ASSERT(pFlac->bs.onRead != NULL); + if (pFlac->bs.onSeek(pFlac->bs.pUserData, (int)seektablePos, ma_dr_flac_seek_origin_start)) { + ma_uint32 iSeekpoint; + for (iSeekpoint = 0; iSeekpoint < seekpointCount; iSeekpoint += 1) { + if (pFlac->bs.onRead(pFlac->bs.pUserData, pFlac->pSeekpoints + iSeekpoint, MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) == MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) { + pFlac->pSeekpoints[iSeekpoint].firstPCMFrame = ma_dr_flac__be2host_64(pFlac->pSeekpoints[iSeekpoint].firstPCMFrame); + pFlac->pSeekpoints[iSeekpoint].flacFrameOffset = ma_dr_flac__be2host_64(pFlac->pSeekpoints[iSeekpoint].flacFrameOffset); + pFlac->pSeekpoints[iSeekpoint].pcmFrameCount = ma_dr_flac__be2host_16(pFlac->pSeekpoints[iSeekpoint].pcmFrameCount); + } else { + pFlac->pSeekpoints = NULL; + pFlac->seekpointCount = 0; + break; + } + } + if (!pFlac->bs.onSeek(pFlac->bs.pUserData, (int)pFlac->firstFLACFramePosInBytes, ma_dr_flac_seek_origin_start)) { + ma_dr_flac__free_from_callbacks(pFlac, &allocationCallbacks); + return NULL; + } + } else { + pFlac->pSeekpoints = NULL; + pFlac->seekpointCount = 0; + } + } + } + if (!init.hasStreamInfoBlock) { + pFlac->currentFLACFrame.header = init.firstFrameHeader; + for (;;) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + break; + } else { + if (result == MA_CRC_MISMATCH) { + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + ma_dr_flac__free_from_callbacks(pFlac, &allocationCallbacks); + return NULL; + } + continue; + } else { + ma_dr_flac__free_from_callbacks(pFlac, &allocationCallbacks); + return NULL; + } + } + } + } + return pFlac; +} +#ifndef MA_DR_FLAC_NO_STDIO +#include +#ifndef MA_DR_FLAC_NO_WCHAR +#include +#endif +static size_t ma_dr_flac__on_read_stdio(void* pUserData, void* bufferOut, size_t bytesToRead) +{ + return fread(bufferOut, 1, bytesToRead, (FILE*)pUserData); +} +static ma_bool32 ma_dr_flac__on_seek_stdio(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + MA_DR_FLAC_ASSERT(offset >= 0); + return fseek((FILE*)pUserData, offset, (origin == ma_dr_flac_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} +MA_API ma_dr_flac* ma_dr_flac_open_file(const char* pFileName, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_fopen(&pFile, pFileName, "rb") != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return NULL; + } + return pFlac; +} +#ifndef MA_DR_FLAC_NO_WCHAR +MA_API ma_dr_flac* ma_dr_flac_open_file_w(const wchar_t* pFileName, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_wfopen(&pFile, pFileName, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return NULL; + } + return pFlac; +} +#endif +MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata(const char* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_fopen(&pFile, pFileName, "rb") != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open_with_metadata_private(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, onMeta, ma_dr_flac_container_unknown, (void*)pFile, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return pFlac; + } + return pFlac; +} +#ifndef MA_DR_FLAC_NO_WCHAR +MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata_w(const wchar_t* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_wfopen(&pFile, pFileName, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open_with_metadata_private(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, onMeta, ma_dr_flac_container_unknown, (void*)pFile, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return pFlac; + } + return pFlac; +} +#endif +#endif +static size_t ma_dr_flac__on_read_memory(void* pUserData, void* bufferOut, size_t bytesToRead) +{ + ma_dr_flac__memory_stream* memoryStream = (ma_dr_flac__memory_stream*)pUserData; + size_t bytesRemaining; + MA_DR_FLAC_ASSERT(memoryStream != NULL); + MA_DR_FLAC_ASSERT(memoryStream->dataSize >= memoryStream->currentReadPos); + bytesRemaining = memoryStream->dataSize - memoryStream->currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (bytesToRead > 0) { + MA_DR_FLAC_COPY_MEMORY(bufferOut, memoryStream->data + memoryStream->currentReadPos, bytesToRead); + memoryStream->currentReadPos += bytesToRead; + } + return bytesToRead; +} +static ma_bool32 ma_dr_flac__on_seek_memory(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + ma_dr_flac__memory_stream* memoryStream = (ma_dr_flac__memory_stream*)pUserData; + MA_DR_FLAC_ASSERT(memoryStream != NULL); + MA_DR_FLAC_ASSERT(offset >= 0); + if (offset > (ma_int64)memoryStream->dataSize) { + return MA_FALSE; + } + if (origin == ma_dr_flac_seek_origin_current) { + if (memoryStream->currentReadPos + offset <= memoryStream->dataSize) { + memoryStream->currentReadPos += offset; + } else { + return MA_FALSE; + } + } else { + if ((ma_uint32)offset <= memoryStream->dataSize) { + memoryStream->currentReadPos = offset; + } else { + return MA_FALSE; + } + } + return MA_TRUE; +} +MA_API ma_dr_flac* ma_dr_flac_open_memory(const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac__memory_stream memoryStream; + ma_dr_flac* pFlac; + memoryStream.data = (const ma_uint8*)pData; + memoryStream.dataSize = dataSize; + memoryStream.currentReadPos = 0; + pFlac = ma_dr_flac_open(ma_dr_flac__on_read_memory, ma_dr_flac__on_seek_memory, &memoryStream, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + pFlac->memoryStream = memoryStream; +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) + { + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + oggbs->pUserData = &pFlac->memoryStream; + } + else +#endif + { + pFlac->bs.pUserData = &pFlac->memoryStream; + } + return pFlac; +} +MA_API ma_dr_flac* ma_dr_flac_open_memory_with_metadata(const void* pData, size_t dataSize, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac__memory_stream memoryStream; + ma_dr_flac* pFlac; + memoryStream.data = (const ma_uint8*)pData; + memoryStream.dataSize = dataSize; + memoryStream.currentReadPos = 0; + pFlac = ma_dr_flac_open_with_metadata_private(ma_dr_flac__on_read_memory, ma_dr_flac__on_seek_memory, onMeta, ma_dr_flac_container_unknown, &memoryStream, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + pFlac->memoryStream = memoryStream; +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) + { + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + oggbs->pUserData = &pFlac->memoryStream; + } + else +#endif + { + pFlac->bs.pUserData = &pFlac->memoryStream; + } + return pFlac; +} +MA_API ma_dr_flac* ma_dr_flac_open(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, NULL, ma_dr_flac_container_unknown, pUserData, pUserData, pAllocationCallbacks); +} +MA_API ma_dr_flac* ma_dr_flac_open_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, NULL, container, pUserData, pUserData, pAllocationCallbacks); +} +MA_API ma_dr_flac* ma_dr_flac_open_with_metadata(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, onMeta, ma_dr_flac_container_unknown, pUserData, pUserData, pAllocationCallbacks); +} +MA_API ma_dr_flac* ma_dr_flac_open_with_metadata_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, onMeta, container, pUserData, pUserData, pAllocationCallbacks); +} +MA_API void ma_dr_flac_close(ma_dr_flac* pFlac) +{ + if (pFlac == NULL) { + return; + } +#ifndef MA_DR_FLAC_NO_STDIO + if (pFlac->bs.onRead == ma_dr_flac__on_read_stdio) { + fclose((FILE*)pFlac->bs.pUserData); + } +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) { + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + MA_DR_FLAC_ASSERT(pFlac->bs.onRead == ma_dr_flac__on_read_ogg); + if (oggbs->onRead == ma_dr_flac__on_read_stdio) { + fclose((FILE*)oggbs->pUserData); + } + } +#endif +#endif + ma_dr_flac__free_from_callbacks(pFlac, &pFlac->allocationCallbacks); +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 left = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 side = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 left0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 left1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 left2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 left3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 right0 = left0 - side0; + ma_uint32 right1 = left1 - side1; + ma_uint32 right2 = left2 - side2; + ma_uint32 right3 = left3 - side3; + pOutputSamples[i*8+0] = (ma_int32)left0; + pOutputSamples[i*8+1] = (ma_int32)right0; + pOutputSamples[i*8+2] = (ma_int32)left1; + pOutputSamples[i*8+3] = (ma_int32)right1; + pOutputSamples[i*8+4] = (ma_int32)left2; + pOutputSamples[i*8+5] = (ma_int32)right2; + pOutputSamples[i*8+6] = (ma_int32)left3; + pOutputSamples[i*8+7] = (ma_int32)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i right = _mm_sub_epi32(left, side); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t left; + uint32x4_t side; + uint32x4_t right; + left = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + right = vsubq_u32(left, side); + ma_dr_flac__vst2q_u32((ma_uint32*)pOutputSamples + i*8, vzipq_u32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 side = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 right = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 side0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 side1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 side2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 side3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 right0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 right1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 right2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 right3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 left0 = right0 + side0; + ma_uint32 left1 = right1 + side1; + ma_uint32 left2 = right2 + side2; + ma_uint32 left3 = right3 + side3; + pOutputSamples[i*8+0] = (ma_int32)left0; + pOutputSamples[i*8+1] = (ma_int32)right0; + pOutputSamples[i*8+2] = (ma_int32)left1; + pOutputSamples[i*8+3] = (ma_int32)right1; + pOutputSamples[i*8+4] = (ma_int32)left2; + pOutputSamples[i*8+5] = (ma_int32)right2; + pOutputSamples[i*8+6] = (ma_int32)left3; + pOutputSamples[i*8+7] = (ma_int32)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i left = _mm_add_epi32(right, side); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t side; + uint32x4_t right; + uint32x4_t left; + side = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + right = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + left = vaddq_u32(right, side); + ma_dr_flac__vst2q_u32((ma_uint32*)pOutputSamples + i*8, vzipq_u32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample); + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_int32 shift = unusedBitsPerSample; + if (shift > 0) { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (mid0 + side0) << shift; + temp1L = (mid1 + side1) << shift; + temp2L = (mid2 + side2) << shift; + temp3L = (mid3 + side3) << shift; + temp0R = (mid0 - side0) << shift; + temp1R = (mid1 - side1) << shift; + temp2R = (mid2 - side2) << shift; + temp3R = (mid3 - side3) << shift; + pOutputSamples[i*8+0] = (ma_int32)temp0L; + pOutputSamples[i*8+1] = (ma_int32)temp0R; + pOutputSamples[i*8+2] = (ma_int32)temp1L; + pOutputSamples[i*8+3] = (ma_int32)temp1R; + pOutputSamples[i*8+4] = (ma_int32)temp2L; + pOutputSamples[i*8+5] = (ma_int32)temp2R; + pOutputSamples[i*8+6] = (ma_int32)temp3L; + pOutputSamples[i*8+7] = (ma_int32)temp3R; + } + } else { + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (ma_uint32)((ma_int32)(mid0 + side0) >> 1); + temp1L = (ma_uint32)((ma_int32)(mid1 + side1) >> 1); + temp2L = (ma_uint32)((ma_int32)(mid2 + side2) >> 1); + temp3L = (ma_uint32)((ma_int32)(mid3 + side3) >> 1); + temp0R = (ma_uint32)((ma_int32)(mid0 - side0) >> 1); + temp1R = (ma_uint32)((ma_int32)(mid1 - side1) >> 1); + temp2R = (ma_uint32)((ma_int32)(mid2 - side2) >> 1); + temp3R = (ma_uint32)((ma_int32)(mid3 - side3) >> 1); + pOutputSamples[i*8+0] = (ma_int32)temp0L; + pOutputSamples[i*8+1] = (ma_int32)temp0R; + pOutputSamples[i*8+2] = (ma_int32)temp1L; + pOutputSamples[i*8+3] = (ma_int32)temp1R; + pOutputSamples[i*8+4] = (ma_int32)temp2L; + pOutputSamples[i*8+5] = (ma_int32)temp2R; + pOutputSamples[i*8+6] = (ma_int32)temp3L; + pOutputSamples[i*8+7] = (ma_int32)temp3R; + } + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample); + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_int32 shift = unusedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_srai_epi32(_mm_add_epi32(mid, side), 1); + right = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)(mid + side) >> 1; + pOutputSamples[i*2+1] = (ma_int32)(mid - side) >> 1; + } + } else { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_slli_epi32(_mm_add_epi32(mid, side), shift); + right = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift); + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift); + } + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_int32 shift = unusedBitsPerSample; + int32x4_t wbpsShift0_4; + int32x4_t wbpsShift1_4; + uint32x4_t one4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + wbpsShift0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + wbpsShift1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + one4 = vdupq_n_u32(1); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, one4)); + left = vshrq_n_s32(vreinterpretq_s32_u32(vaddq_u32(mid, side)), 1); + right = vshrq_n_s32(vreinterpretq_s32_u32(vsubq_u32(mid, side)), 1); + ma_dr_flac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)(mid + side) >> 1; + pOutputSamples[i*2+1] = (ma_int32)(mid - side) >> 1; + } + } else { + int32x4_t shift4; + shift -= 1; + shift4 = vdupq_n_s32(shift); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, one4)); + left = vreinterpretq_s32_u32(vshlq_u32(vaddq_u32(mid, side), shift4)); + right = vreinterpretq_s32_u32(vshlq_u32(vsubq_u32(mid, side), shift4)); + ma_dr_flac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift); + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift); + } + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)); + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 tempL0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 tempL1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 tempL2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 tempL3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 tempR0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 tempR1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 tempR2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 tempR3 = pInputSamples1U32[i*4+3] << shift1; + pOutputSamples[i*8+0] = (ma_int32)tempL0; + pOutputSamples[i*8+1] = (ma_int32)tempR0; + pOutputSamples[i*8+2] = (ma_int32)tempL1; + pOutputSamples[i*8+3] = (ma_int32)tempR1; + pOutputSamples[i*8+4] = (ma_int32)tempL2; + pOutputSamples[i*8+5] = (ma_int32)tempR2; + pOutputSamples[i*8+6] = (ma_int32)tempL3; + pOutputSamples[i*8+7] = (ma_int32)tempR3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0); + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0); + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1); + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift4_0 = vdupq_n_s32(shift0); + int32x4_t shift4_1 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + int32x4_t left; + int32x4_t right; + left = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift4_0)); + right = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift4_1)); + ma_dr_flac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0); + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s32(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 framesRead; + ma_uint32 unusedBitsPerSample; + if (pFlac == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, framesToRead); + } + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 32); + unusedBitsPerSample = 32 - pFlac->bitsPerSample; + framesRead = 0; + while (framesToRead > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + unsigned int channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + ma_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining; + ma_uint64 frameCountThisIteration = framesToRead; + if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) { + frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining; + } + if (channelCount == 2) { + const ma_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame; + const ma_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame; + switch (pFlac->currentFLACFrame.header.channelAssignment) + { + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE: + { + ma_dr_flac_read_pcm_frames_s32__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE: + { + ma_dr_flac_read_pcm_frames_s32__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE: + { + ma_dr_flac_read_pcm_frames_s32__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT: + default: + { + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + } + } else { + ma_uint64 i; + for (i = 0; i < frameCountThisIteration; ++i) { + unsigned int j; + for (j = 0; j < channelCount; ++j) { + pBufferOut[(i*channelCount)+j] = (ma_int32)((ma_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample)); + } + } + } + framesRead += frameCountThisIteration; + pBufferOut += frameCountThisIteration * channelCount; + framesToRead -= frameCountThisIteration; + pFlac->currentPCMFrame += frameCountThisIteration; + pFlac->currentFLACFrame.pcmFramesRemaining -= (ma_uint32)frameCountThisIteration; + } + } + return framesRead; +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 left = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 side = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 left0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 left1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 left2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 left3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 right0 = left0 - side0; + ma_uint32 right1 = left1 - side1; + ma_uint32 right2 = left2 - side2; + ma_uint32 right3 = left3 - side3; + left0 >>= 16; + left1 >>= 16; + left2 >>= 16; + left3 >>= 16; + right0 >>= 16; + right1 >>= 16; + right2 >>= 16; + right3 >>= 16; + pOutputSamples[i*8+0] = (ma_int16)left0; + pOutputSamples[i*8+1] = (ma_int16)right0; + pOutputSamples[i*8+2] = (ma_int16)left1; + pOutputSamples[i*8+3] = (ma_int16)right1; + pOutputSamples[i*8+4] = (ma_int16)left2; + pOutputSamples[i*8+5] = (ma_int16)right2; + pOutputSamples[i*8+6] = (ma_int16)left3; + pOutputSamples[i*8+7] = (ma_int16)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i right = _mm_sub_epi32(left, side); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t left; + uint32x4_t side; + uint32x4_t right; + left = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + right = vsubq_u32(left, side); + left = vshrq_n_u32(left, 16); + right = vshrq_n_u32(right, 16); + ma_dr_flac__vst2q_u16((ma_uint16*)pOutputSamples + i*8, vzip_u16(vmovn_u32(left), vmovn_u32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 side = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 right = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 side0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 side1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 side2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 side3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 right0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 right1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 right2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 right3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 left0 = right0 + side0; + ma_uint32 left1 = right1 + side1; + ma_uint32 left2 = right2 + side2; + ma_uint32 left3 = right3 + side3; + left0 >>= 16; + left1 >>= 16; + left2 >>= 16; + left3 >>= 16; + right0 >>= 16; + right1 >>= 16; + right2 >>= 16; + right3 >>= 16; + pOutputSamples[i*8+0] = (ma_int16)left0; + pOutputSamples[i*8+1] = (ma_int16)right0; + pOutputSamples[i*8+2] = (ma_int16)left1; + pOutputSamples[i*8+3] = (ma_int16)right1; + pOutputSamples[i*8+4] = (ma_int16)left2; + pOutputSamples[i*8+5] = (ma_int16)right2; + pOutputSamples[i*8+6] = (ma_int16)left3; + pOutputSamples[i*8+7] = (ma_int16)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i left = _mm_add_epi32(right, side); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t side; + uint32x4_t right; + uint32x4_t left; + side = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + right = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + left = vaddq_u32(right, side); + left = vshrq_n_u32(left, 16); + right = vshrq_n_u32(right, 16); + ma_dr_flac__vst2q_u16((ma_uint16*)pOutputSamples + i*8, vzip_u16(vmovn_u32(left), vmovn_u32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + ma_uint32 mid = (ma_uint32)pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = (ma_uint32)pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample) >> 16); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + if (shift > 0) { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (mid0 + side0) << shift; + temp1L = (mid1 + side1) << shift; + temp2L = (mid2 + side2) << shift; + temp3L = (mid3 + side3) << shift; + temp0R = (mid0 - side0) << shift; + temp1R = (mid1 - side1) << shift; + temp2R = (mid2 - side2) << shift; + temp3R = (mid3 - side3) << shift; + temp0L >>= 16; + temp1L >>= 16; + temp2L >>= 16; + temp3L >>= 16; + temp0R >>= 16; + temp1R >>= 16; + temp2R >>= 16; + temp3R >>= 16; + pOutputSamples[i*8+0] = (ma_int16)temp0L; + pOutputSamples[i*8+1] = (ma_int16)temp0R; + pOutputSamples[i*8+2] = (ma_int16)temp1L; + pOutputSamples[i*8+3] = (ma_int16)temp1R; + pOutputSamples[i*8+4] = (ma_int16)temp2L; + pOutputSamples[i*8+5] = (ma_int16)temp2R; + pOutputSamples[i*8+6] = (ma_int16)temp3L; + pOutputSamples[i*8+7] = (ma_int16)temp3R; + } + } else { + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = ((ma_int32)(mid0 + side0) >> 1); + temp1L = ((ma_int32)(mid1 + side1) >> 1); + temp2L = ((ma_int32)(mid2 + side2) >> 1); + temp3L = ((ma_int32)(mid3 + side3) >> 1); + temp0R = ((ma_int32)(mid0 - side0) >> 1); + temp1R = ((ma_int32)(mid1 - side1) >> 1); + temp2R = ((ma_int32)(mid2 - side2) >> 1); + temp3R = ((ma_int32)(mid3 - side3) >> 1); + temp0L >>= 16; + temp1L >>= 16; + temp2L >>= 16; + temp3L >>= 16; + temp0R >>= 16; + temp1R >>= 16; + temp2R >>= 16; + temp3R >>= 16; + pOutputSamples[i*8+0] = (ma_int16)temp0L; + pOutputSamples[i*8+1] = (ma_int16)temp0R; + pOutputSamples[i*8+2] = (ma_int16)temp1L; + pOutputSamples[i*8+3] = (ma_int16)temp1R; + pOutputSamples[i*8+4] = (ma_int16)temp2L; + pOutputSamples[i*8+5] = (ma_int16)temp2R; + pOutputSamples[i*8+6] = (ma_int16)temp3L; + pOutputSamples[i*8+7] = (ma_int16)temp3R; + } + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample) >> 16); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_srai_epi32(_mm_add_epi32(mid, side), 1); + right = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_int32)(mid + side) >> 1) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_int32)(mid - side) >> 1) >> 16); + } + } else { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_slli_epi32(_mm_add_epi32(mid, side), shift); + right = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((mid + side) << shift) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((mid - side) << shift) >> 16); + } + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + int32x4_t wbpsShift0_4; + int32x4_t wbpsShift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + wbpsShift0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + wbpsShift1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + left = vshrq_n_s32(vreinterpretq_s32_u32(vaddq_u32(mid, side)), 1); + right = vshrq_n_s32(vreinterpretq_s32_u32(vsubq_u32(mid, side)), 1); + left = vshrq_n_s32(left, 16); + right = vshrq_n_s32(right, 16); + ma_dr_flac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_int32)(mid + side) >> 1) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_int32)(mid - side) >> 1) >> 16); + } + } else { + int32x4_t shift4; + shift -= 1; + shift4 = vdupq_n_s32(shift); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + left = vreinterpretq_s32_u32(vshlq_u32(vaddq_u32(mid, side), shift4)); + right = vreinterpretq_s32_u32(vshlq_u32(vsubq_u32(mid, side), shift4)); + left = vshrq_n_s32(left, 16); + right = vshrq_n_s32(right, 16); + ma_dr_flac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((mid + side) << shift) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((mid - side) << shift) >> 16); + } + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((ma_int32)((ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((ma_int32)((ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)) >> 16); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 tempL0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 tempL1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 tempL2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 tempL3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 tempR0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 tempR1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 tempR2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 tempR3 = pInputSamples1U32[i*4+3] << shift1; + tempL0 >>= 16; + tempL1 >>= 16; + tempL2 >>= 16; + tempL3 >>= 16; + tempR0 >>= 16; + tempR1 >>= 16; + tempR2 >>= 16; + tempR3 >>= 16; + pOutputSamples[i*8+0] = (ma_int16)tempL0; + pOutputSamples[i*8+1] = (ma_int16)tempR0; + pOutputSamples[i*8+2] = (ma_int16)tempL1; + pOutputSamples[i*8+3] = (ma_int16)tempR1; + pOutputSamples[i*8+4] = (ma_int16)tempL2; + pOutputSamples[i*8+5] = (ma_int16)tempR2; + pOutputSamples[i*8+6] = (ma_int16)tempL3; + pOutputSamples[i*8+7] = (ma_int16)tempR3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((pInputSamples0U32[i] << shift0) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((pInputSamples1U32[i] << shift1) >> 16); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((pInputSamples0U32[i] << shift0) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((pInputSamples1U32[i] << shift1) >> 16); + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4 = vdupq_n_s32(shift0); + int32x4_t shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + int32x4_t left; + int32x4_t right; + left = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4)); + right = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4)); + left = vshrq_n_s32(left, 16); + right = vshrq_n_s32(right, 16); + ma_dr_flac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((pInputSamples0U32[i] << shift0) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((pInputSamples1U32[i] << shift1) >> 16); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s16(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 framesRead; + ma_uint32 unusedBitsPerSample; + if (pFlac == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, framesToRead); + } + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 32); + unusedBitsPerSample = 32 - pFlac->bitsPerSample; + framesRead = 0; + while (framesToRead > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + unsigned int channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + ma_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining; + ma_uint64 frameCountThisIteration = framesToRead; + if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) { + frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining; + } + if (channelCount == 2) { + const ma_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame; + const ma_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame; + switch (pFlac->currentFLACFrame.header.channelAssignment) + { + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE: + { + ma_dr_flac_read_pcm_frames_s16__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE: + { + ma_dr_flac_read_pcm_frames_s16__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE: + { + ma_dr_flac_read_pcm_frames_s16__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT: + default: + { + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + } + } else { + ma_uint64 i; + for (i = 0; i < frameCountThisIteration; ++i) { + unsigned int j; + for (j = 0; j < channelCount; ++j) { + ma_int32 sampleS32 = (ma_int32)((ma_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample)); + pBufferOut[(i*channelCount)+j] = (ma_int16)(sampleS32 >> 16); + } + } + } + framesRead += frameCountThisIteration; + pBufferOut += frameCountThisIteration * channelCount; + framesToRead -= frameCountThisIteration; + pFlac->currentPCMFrame += frameCountThisIteration; + pFlac->currentFLACFrame.pcmFramesRemaining -= (ma_uint32)frameCountThisIteration; + } + } + return framesRead; +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 left = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 side = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (float)((ma_int32)left / 2147483648.0); + pOutputSamples[i*2+1] = (float)((ma_int32)right / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + float factor = 1 / 2147483648.0; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 left0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 left1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 left2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 left3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 right0 = left0 - side0; + ma_uint32 right1 = left1 - side1; + ma_uint32 right2 = left2 - side2; + ma_uint32 right3 = left3 - side3; + pOutputSamples[i*8+0] = (ma_int32)left0 * factor; + pOutputSamples[i*8+1] = (ma_int32)right0 * factor; + pOutputSamples[i*8+2] = (ma_int32)left1 * factor; + pOutputSamples[i*8+3] = (ma_int32)right1 * factor; + pOutputSamples[i*8+4] = (ma_int32)left2 * factor; + pOutputSamples[i*8+5] = (ma_int32)right2 * factor; + pOutputSamples[i*8+6] = (ma_int32)left3 * factor; + pOutputSamples[i*8+7] = (ma_int32)right3 * factor; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left * factor; + pOutputSamples[i*2+1] = (ma_int32)right * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + __m128 factor; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = _mm_set1_ps(1.0f / 8388608.0f); + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i right = _mm_sub_epi32(left, side); + __m128 leftf = _mm_mul_ps(_mm_cvtepi32_ps(left), factor); + __m128 rightf = _mm_mul_ps(_mm_cvtepi32_ps(right), factor); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float32x4_t factor4; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor4 = vdupq_n_f32(1.0f / 8388608.0f); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t left; + uint32x4_t side; + uint32x4_t right; + float32x4_t leftf; + float32x4_t rightf; + left = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + right = vsubq_u32(left, side); + leftf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(left)), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(right)), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 side = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 right = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (float)((ma_int32)left / 2147483648.0); + pOutputSamples[i*2+1] = (float)((ma_int32)right / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + float factor = 1 / 2147483648.0; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 side0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 side1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 side2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 side3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 right0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 right1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 right2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 right3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 left0 = right0 + side0; + ma_uint32 left1 = right1 + side1; + ma_uint32 left2 = right2 + side2; + ma_uint32 left3 = right3 + side3; + pOutputSamples[i*8+0] = (ma_int32)left0 * factor; + pOutputSamples[i*8+1] = (ma_int32)right0 * factor; + pOutputSamples[i*8+2] = (ma_int32)left1 * factor; + pOutputSamples[i*8+3] = (ma_int32)right1 * factor; + pOutputSamples[i*8+4] = (ma_int32)left2 * factor; + pOutputSamples[i*8+5] = (ma_int32)right2 * factor; + pOutputSamples[i*8+6] = (ma_int32)left3 * factor; + pOutputSamples[i*8+7] = (ma_int32)right3 * factor; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left * factor; + pOutputSamples[i*2+1] = (ma_int32)right * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + __m128 factor; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = _mm_set1_ps(1.0f / 8388608.0f); + for (i = 0; i < frameCount4; ++i) { + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i left = _mm_add_epi32(right, side); + __m128 leftf = _mm_mul_ps(_mm_cvtepi32_ps(left), factor); + __m128 rightf = _mm_mul_ps(_mm_cvtepi32_ps(right), factor); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float32x4_t factor4; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor4 = vdupq_n_f32(1.0f / 8388608.0f); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t side; + uint32x4_t right; + uint32x4_t left; + float32x4_t leftf; + float32x4_t rightf; + side = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + right = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + left = vaddq_u32(right, side); + leftf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(left)), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(right)), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + ma_uint32 mid = (ma_uint32)pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = (ma_uint32)pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (float)((((ma_int32)(mid + side) >> 1) << (unusedBitsPerSample)) / 2147483648.0); + pOutputSamples[i*2+1] = (float)((((ma_int32)(mid - side) >> 1) << (unusedBitsPerSample)) / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + float factor = 1 / 2147483648.0; + if (shift > 0) { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (mid0 + side0) << shift; + temp1L = (mid1 + side1) << shift; + temp2L = (mid2 + side2) << shift; + temp3L = (mid3 + side3) << shift; + temp0R = (mid0 - side0) << shift; + temp1R = (mid1 - side1) << shift; + temp2R = (mid2 - side2) << shift; + temp3R = (mid3 - side3) << shift; + pOutputSamples[i*8+0] = (ma_int32)temp0L * factor; + pOutputSamples[i*8+1] = (ma_int32)temp0R * factor; + pOutputSamples[i*8+2] = (ma_int32)temp1L * factor; + pOutputSamples[i*8+3] = (ma_int32)temp1R * factor; + pOutputSamples[i*8+4] = (ma_int32)temp2L * factor; + pOutputSamples[i*8+5] = (ma_int32)temp2R * factor; + pOutputSamples[i*8+6] = (ma_int32)temp3L * factor; + pOutputSamples[i*8+7] = (ma_int32)temp3R * factor; + } + } else { + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (ma_uint32)((ma_int32)(mid0 + side0) >> 1); + temp1L = (ma_uint32)((ma_int32)(mid1 + side1) >> 1); + temp2L = (ma_uint32)((ma_int32)(mid2 + side2) >> 1); + temp3L = (ma_uint32)((ma_int32)(mid3 + side3) >> 1); + temp0R = (ma_uint32)((ma_int32)(mid0 - side0) >> 1); + temp1R = (ma_uint32)((ma_int32)(mid1 - side1) >> 1); + temp2R = (ma_uint32)((ma_int32)(mid2 - side2) >> 1); + temp3R = (ma_uint32)((ma_int32)(mid3 - side3) >> 1); + pOutputSamples[i*8+0] = (ma_int32)temp0L * factor; + pOutputSamples[i*8+1] = (ma_int32)temp0R * factor; + pOutputSamples[i*8+2] = (ma_int32)temp1L * factor; + pOutputSamples[i*8+3] = (ma_int32)temp1R * factor; + pOutputSamples[i*8+4] = (ma_int32)temp2L * factor; + pOutputSamples[i*8+5] = (ma_int32)temp2R * factor; + pOutputSamples[i*8+6] = (ma_int32)temp3L * factor; + pOutputSamples[i*8+7] = (ma_int32)temp3R * factor; + } + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample) * factor; + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample) * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample - 8; + float factor; + __m128 factor128; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = 1.0f / 8388608.0f; + factor128 = _mm_set1_ps(factor); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i tempL; + __m128i tempR; + __m128 leftf; + __m128 rightf; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + tempL = _mm_srai_epi32(_mm_add_epi32(mid, side), 1); + tempR = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1); + leftf = _mm_mul_ps(_mm_cvtepi32_ps(tempL), factor128); + rightf = _mm_mul_ps(_mm_cvtepi32_ps(tempR), factor128); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = ((ma_int32)(mid + side) >> 1) * factor; + pOutputSamples[i*2+1] = ((ma_int32)(mid - side) >> 1) * factor; + } + } else { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i tempL; + __m128i tempR; + __m128 leftf; + __m128 rightf; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + tempL = _mm_slli_epi32(_mm_add_epi32(mid, side), shift); + tempR = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift); + leftf = _mm_mul_ps(_mm_cvtepi32_ps(tempL), factor128); + rightf = _mm_mul_ps(_mm_cvtepi32_ps(tempR), factor128); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift) * factor; + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift) * factor; + } + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample - 8; + float factor; + float32x4_t factor4; + int32x4_t shift4; + int32x4_t wbps0_4; + int32x4_t wbps1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = 1.0f / 8388608.0f; + factor4 = vdupq_n_f32(factor); + wbps0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + wbps1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + int32x4_t lefti; + int32x4_t righti; + float32x4_t leftf; + float32x4_t rightf; + uint32x4_t mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbps0_4); + uint32x4_t side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbps1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + lefti = vshrq_n_s32(vreinterpretq_s32_u32(vaddq_u32(mid, side)), 1); + righti = vshrq_n_s32(vreinterpretq_s32_u32(vsubq_u32(mid, side)), 1); + leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = ((ma_int32)(mid + side) >> 1) * factor; + pOutputSamples[i*2+1] = ((ma_int32)(mid - side) >> 1) * factor; + } + } else { + shift -= 1; + shift4 = vdupq_n_s32(shift); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t lefti; + int32x4_t righti; + float32x4_t leftf; + float32x4_t rightf; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbps0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbps1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + lefti = vreinterpretq_s32_u32(vshlq_u32(vaddq_u32(mid, side), shift4)); + righti = vreinterpretq_s32_u32(vshlq_u32(vsubq_u32(mid, side), shift4)); + leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift) * factor; + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift) * factor; + } + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + pOutputSamples[i*2+0] = (float)((ma_int32)((ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)) / 2147483648.0); + pOutputSamples[i*2+1] = (float)((ma_int32)((ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)) / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + float factor = 1 / 2147483648.0; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 tempL0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 tempL1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 tempL2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 tempL3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 tempR0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 tempR1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 tempR2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 tempR3 = pInputSamples1U32[i*4+3] << shift1; + pOutputSamples[i*8+0] = (ma_int32)tempL0 * factor; + pOutputSamples[i*8+1] = (ma_int32)tempR0 * factor; + pOutputSamples[i*8+2] = (ma_int32)tempL1 * factor; + pOutputSamples[i*8+3] = (ma_int32)tempR1 * factor; + pOutputSamples[i*8+4] = (ma_int32)tempL2 * factor; + pOutputSamples[i*8+5] = (ma_int32)tempR2 * factor; + pOutputSamples[i*8+6] = (ma_int32)tempL3 * factor; + pOutputSamples[i*8+7] = (ma_int32)tempR3 * factor; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0) * factor; + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1) * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float factor = 1.0f / 8388608.0f; + __m128 factor128 = _mm_set1_ps(factor); + for (i = 0; i < frameCount4; ++i) { + __m128i lefti; + __m128i righti; + __m128 leftf; + __m128 rightf; + lefti = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + righti = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + leftf = _mm_mul_ps(_mm_cvtepi32_ps(lefti), factor128); + rightf = _mm_mul_ps(_mm_cvtepi32_ps(righti), factor128); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0) * factor; + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1) * factor; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float factor = 1.0f / 8388608.0f; + float32x4_t factor4 = vdupq_n_f32(factor); + int32x4_t shift0_4 = vdupq_n_s32(shift0); + int32x4_t shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + int32x4_t lefti; + int32x4_t righti; + float32x4_t leftf; + float32x4_t rightf; + lefti = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4)); + righti = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4)); + leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0) * factor; + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1) * factor; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_f32(ma_dr_flac* pFlac, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 framesRead; + ma_uint32 unusedBitsPerSample; + if (pFlac == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, framesToRead); + } + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 32); + unusedBitsPerSample = 32 - pFlac->bitsPerSample; + framesRead = 0; + while (framesToRead > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + unsigned int channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + ma_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining; + ma_uint64 frameCountThisIteration = framesToRead; + if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) { + frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining; + } + if (channelCount == 2) { + const ma_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame; + const ma_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame; + switch (pFlac->currentFLACFrame.header.channelAssignment) + { + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE: + { + ma_dr_flac_read_pcm_frames_f32__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE: + { + ma_dr_flac_read_pcm_frames_f32__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE: + { + ma_dr_flac_read_pcm_frames_f32__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT: + default: + { + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + } + } else { + ma_uint64 i; + for (i = 0; i < frameCountThisIteration; ++i) { + unsigned int j; + for (j = 0; j < channelCount; ++j) { + ma_int32 sampleS32 = (ma_int32)((ma_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample)); + pBufferOut[(i*channelCount)+j] = (float)(sampleS32 / 2147483648.0); + } + } + } + framesRead += frameCountThisIteration; + pBufferOut += frameCountThisIteration * channelCount; + framesToRead -= frameCountThisIteration; + pFlac->currentPCMFrame += frameCountThisIteration; + pFlac->currentFLACFrame.pcmFramesRemaining -= (unsigned int)frameCountThisIteration; + } + } + return framesRead; +} +MA_API ma_bool32 ma_dr_flac_seek_to_pcm_frame(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + if (pFlac == NULL) { + return MA_FALSE; + } + if (pFlac->currentPCMFrame == pcmFrameIndex) { + return MA_TRUE; + } + if (pFlac->firstFLACFramePosInBytes == 0) { + return MA_FALSE; + } + if (pcmFrameIndex == 0) { + pFlac->currentPCMFrame = 0; + return ma_dr_flac__seek_to_first_frame(pFlac); + } else { + ma_bool32 wasSuccessful = MA_FALSE; + ma_uint64 originalPCMFrame = pFlac->currentPCMFrame; + if (pcmFrameIndex > pFlac->totalPCMFrameCount) { + pcmFrameIndex = pFlac->totalPCMFrameCount; + } + if (pcmFrameIndex > pFlac->currentPCMFrame) { + ma_uint32 offset = (ma_uint32)(pcmFrameIndex - pFlac->currentPCMFrame); + if (pFlac->currentFLACFrame.pcmFramesRemaining > offset) { + pFlac->currentFLACFrame.pcmFramesRemaining -= offset; + pFlac->currentPCMFrame = pcmFrameIndex; + return MA_TRUE; + } + } else { + ma_uint32 offsetAbs = (ma_uint32)(pFlac->currentPCMFrame - pcmFrameIndex); + ma_uint32 currentFLACFramePCMFrameCount = pFlac->currentFLACFrame.header.blockSizeInPCMFrames; + ma_uint32 currentFLACFramePCMFramesConsumed = currentFLACFramePCMFrameCount - pFlac->currentFLACFrame.pcmFramesRemaining; + if (currentFLACFramePCMFramesConsumed > offsetAbs) { + pFlac->currentFLACFrame.pcmFramesRemaining += offsetAbs; + pFlac->currentPCMFrame = pcmFrameIndex; + return MA_TRUE; + } + } +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) + { + wasSuccessful = ma_dr_flac_ogg__seek_to_pcm_frame(pFlac, pcmFrameIndex); + } + else +#endif + { + if (!pFlac->_noSeekTableSeek) { + wasSuccessful = ma_dr_flac__seek_to_pcm_frame__seek_table(pFlac, pcmFrameIndex); + } +#if !defined(MA_DR_FLAC_NO_CRC) + if (!wasSuccessful && !pFlac->_noBinarySearchSeek && pFlac->totalPCMFrameCount > 0) { + wasSuccessful = ma_dr_flac__seek_to_pcm_frame__binary_search(pFlac, pcmFrameIndex); + } +#endif + if (!wasSuccessful && !pFlac->_noBruteForceSeek) { + wasSuccessful = ma_dr_flac__seek_to_pcm_frame__brute_force(pFlac, pcmFrameIndex); + } + } + if (wasSuccessful) { + pFlac->currentPCMFrame = pcmFrameIndex; + } else { + if (ma_dr_flac_seek_to_pcm_frame(pFlac, originalPCMFrame) == MA_FALSE) { + ma_dr_flac_seek_to_pcm_frame(pFlac, 0); + } + } + return wasSuccessful; + } +} +#define MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(extension, type) \ +static type* ma_dr_flac__full_read_and_close_ ## extension (ma_dr_flac* pFlac, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut)\ +{ \ + type* pSampleData = NULL; \ + ma_uint64 totalPCMFrameCount; \ + \ + MA_DR_FLAC_ASSERT(pFlac != NULL); \ + \ + totalPCMFrameCount = pFlac->totalPCMFrameCount; \ + \ + if (totalPCMFrameCount == 0) { \ + type buffer[4096]; \ + ma_uint64 pcmFramesRead; \ + size_t sampleDataBufferSize = sizeof(buffer); \ + \ + pSampleData = (type*)ma_dr_flac__malloc_from_callbacks(sampleDataBufferSize, &pFlac->allocationCallbacks); \ + if (pSampleData == NULL) { \ + goto on_error; \ + } \ + \ + while ((pcmFramesRead = (ma_uint64)ma_dr_flac_read_pcm_frames_##extension(pFlac, sizeof(buffer)/sizeof(buffer[0])/pFlac->channels, buffer)) > 0) { \ + if (((totalPCMFrameCount + pcmFramesRead) * pFlac->channels * sizeof(type)) > sampleDataBufferSize) { \ + type* pNewSampleData; \ + size_t newSampleDataBufferSize; \ + \ + newSampleDataBufferSize = sampleDataBufferSize * 2; \ + pNewSampleData = (type*)ma_dr_flac__realloc_from_callbacks(pSampleData, newSampleDataBufferSize, sampleDataBufferSize, &pFlac->allocationCallbacks); \ + if (pNewSampleData == NULL) { \ + ma_dr_flac__free_from_callbacks(pSampleData, &pFlac->allocationCallbacks); \ + goto on_error; \ + } \ + \ + sampleDataBufferSize = newSampleDataBufferSize; \ + pSampleData = pNewSampleData; \ + } \ + \ + MA_DR_FLAC_COPY_MEMORY(pSampleData + (totalPCMFrameCount*pFlac->channels), buffer, (size_t)(pcmFramesRead*pFlac->channels*sizeof(type))); \ + totalPCMFrameCount += pcmFramesRead; \ + } \ + \ + \ + MA_DR_FLAC_ZERO_MEMORY(pSampleData + (totalPCMFrameCount*pFlac->channels), (size_t)(sampleDataBufferSize - totalPCMFrameCount*pFlac->channels*sizeof(type))); \ + } else { \ + ma_uint64 dataSize = totalPCMFrameCount*pFlac->channels*sizeof(type); \ + if (dataSize > (ma_uint64)MA_SIZE_MAX) { \ + goto on_error; \ + } \ + \ + pSampleData = (type*)ma_dr_flac__malloc_from_callbacks((size_t)dataSize, &pFlac->allocationCallbacks); \ + if (pSampleData == NULL) { \ + goto on_error; \ + } \ + \ + totalPCMFrameCount = ma_dr_flac_read_pcm_frames_##extension(pFlac, pFlac->totalPCMFrameCount, pSampleData); \ + } \ + \ + if (sampleRateOut) *sampleRateOut = pFlac->sampleRate; \ + if (channelsOut) *channelsOut = pFlac->channels; \ + if (totalPCMFrameCountOut) *totalPCMFrameCountOut = totalPCMFrameCount; \ + \ + ma_dr_flac_close(pFlac); \ + return pSampleData; \ + \ +on_error: \ + ma_dr_flac_close(pFlac); \ + return NULL; \ +} +MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(s32, ma_int32) +MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(s16, ma_int16) +MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(f32, float) +MA_API ma_int32* ma_dr_flac_open_and_read_pcm_frames_s32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalPCMFrameCountOut) { + *totalPCMFrameCountOut = 0; + } + pFlac = ma_dr_flac_open(onRead, onSeek, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s32(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut); +} +MA_API ma_int16* ma_dr_flac_open_and_read_pcm_frames_s16(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalPCMFrameCountOut) { + *totalPCMFrameCountOut = 0; + } + pFlac = ma_dr_flac_open(onRead, onSeek, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s16(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut); +} +MA_API float* ma_dr_flac_open_and_read_pcm_frames_f32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalPCMFrameCountOut) { + *totalPCMFrameCountOut = 0; + } + pFlac = ma_dr_flac_open(onRead, onSeek, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_f32(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut); +} +#ifndef MA_DR_FLAC_NO_STDIO +MA_API ma_int32* ma_dr_flac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_file(filename, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API ma_int16* ma_dr_flac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_file(filename, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s16(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API float* ma_dr_flac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_file(filename, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_f32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +#endif +MA_API ma_int32* ma_dr_flac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_memory(data, dataSize, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API ma_int16* ma_dr_flac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_memory(data, dataSize, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s16(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API float* ma_dr_flac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_memory(data, dataSize, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_f32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API void ma_dr_flac_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + ma_dr_flac__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma_dr_flac__free_default(p, NULL); + } +} +MA_API void ma_dr_flac_init_vorbis_comment_iterator(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32 commentCount, const void* pComments) +{ + if (pIter == NULL) { + return; + } + pIter->countRemaining = commentCount; + pIter->pRunningData = (const char*)pComments; +} +MA_API const char* ma_dr_flac_next_vorbis_comment(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32* pCommentLengthOut) +{ + ma_int32 length; + const char* pComment; + if (pCommentLengthOut) { + *pCommentLengthOut = 0; + } + if (pIter == NULL || pIter->countRemaining == 0 || pIter->pRunningData == NULL) { + return NULL; + } + length = ma_dr_flac__le2host_32_ptr_unaligned(pIter->pRunningData); + pIter->pRunningData += 4; + pComment = pIter->pRunningData; + pIter->pRunningData += length; + pIter->countRemaining -= 1; + if (pCommentLengthOut) { + *pCommentLengthOut = length; + } + return pComment; +} +MA_API void ma_dr_flac_init_cuesheet_track_iterator(ma_dr_flac_cuesheet_track_iterator* pIter, ma_uint32 trackCount, const void* pTrackData) +{ + if (pIter == NULL) { + return; + } + pIter->countRemaining = trackCount; + pIter->pRunningData = (const char*)pTrackData; +} +MA_API ma_bool32 ma_dr_flac_next_cuesheet_track(ma_dr_flac_cuesheet_track_iterator* pIter, ma_dr_flac_cuesheet_track* pCuesheetTrack) +{ + ma_dr_flac_cuesheet_track cuesheetTrack; + const char* pRunningData; + ma_uint64 offsetHi; + ma_uint64 offsetLo; + if (pIter == NULL || pIter->countRemaining == 0 || pIter->pRunningData == NULL) { + return MA_FALSE; + } + pRunningData = pIter->pRunningData; + offsetHi = ma_dr_flac__be2host_32(*(const ma_uint32*)pRunningData); pRunningData += 4; + offsetLo = ma_dr_flac__be2host_32(*(const ma_uint32*)pRunningData); pRunningData += 4; + cuesheetTrack.offset = offsetLo | (offsetHi << 32); + cuesheetTrack.trackNumber = pRunningData[0]; pRunningData += 1; + MA_DR_FLAC_COPY_MEMORY(cuesheetTrack.ISRC, pRunningData, sizeof(cuesheetTrack.ISRC)); pRunningData += 12; + cuesheetTrack.isAudio = (pRunningData[0] & 0x80) != 0; + cuesheetTrack.preEmphasis = (pRunningData[0] & 0x40) != 0; pRunningData += 14; + cuesheetTrack.indexCount = pRunningData[0]; pRunningData += 1; + cuesheetTrack.pIndexPoints = (const ma_dr_flac_cuesheet_track_index*)pRunningData; pRunningData += cuesheetTrack.indexCount * sizeof(ma_dr_flac_cuesheet_track_index); + pIter->pRunningData = pRunningData; + pIter->countRemaining -= 1; + if (pCuesheetTrack) { + *pCuesheetTrack = cuesheetTrack; + } + return MA_TRUE; +} +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic pop +#endif +#endif +/* dr_flac_c end */ +#endif /* MA_DR_FLAC_IMPLEMENTATION */ +#endif /* MA_NO_FLAC */ + +#if !defined(MA_NO_MP3) && !defined(MA_NO_DECODING) +#if !defined(MA_DR_MP3_IMPLEMENTATION) && !defined(MA_DR_MP3_IMPLEMENTATION) /* For backwards compatibility. Will be removed in version 0.11 for cleanliness. */ +/* dr_mp3_c begin */ +#ifndef ma_dr_mp3_c +#define ma_dr_mp3_c +#include +#include +#include +MA_API void ma_dr_mp3_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_DR_MP3_VERSION_MAJOR; + } + if (pMinor) { + *pMinor = MA_DR_MP3_VERSION_MINOR; + } + if (pRevision) { + *pRevision = MA_DR_MP3_VERSION_REVISION; + } +} +MA_API const char* ma_dr_mp3_version_string(void) +{ + return MA_DR_MP3_VERSION_STRING; +} +#if defined(__TINYC__) +#define MA_DR_MP3_NO_SIMD +#endif +#define MA_DR_MP3_OFFSET_PTR(p, offset) ((void*)((ma_uint8*)(p) + (offset))) +#define MA_DR_MP3_MAX_FREE_FORMAT_FRAME_SIZE 2304 +#ifndef MA_DR_MP3_MAX_FRAME_SYNC_MATCHES +#define MA_DR_MP3_MAX_FRAME_SYNC_MATCHES 10 +#endif +#define MA_DR_MP3_MAX_L3_FRAME_PAYLOAD_BYTES MA_DR_MP3_MAX_FREE_FORMAT_FRAME_SIZE +#define MA_DR_MP3_MAX_BITRESERVOIR_BYTES 511 +#define MA_DR_MP3_SHORT_BLOCK_TYPE 2 +#define MA_DR_MP3_STOP_BLOCK_TYPE 3 +#define MA_DR_MP3_MODE_MONO 3 +#define MA_DR_MP3_MODE_JOINT_STEREO 1 +#define MA_DR_MP3_HDR_SIZE 4 +#define MA_DR_MP3_HDR_IS_MONO(h) (((h[3]) & 0xC0) == 0xC0) +#define MA_DR_MP3_HDR_IS_MS_STEREO(h) (((h[3]) & 0xE0) == 0x60) +#define MA_DR_MP3_HDR_IS_FREE_FORMAT(h) (((h[2]) & 0xF0) == 0) +#define MA_DR_MP3_HDR_IS_CRC(h) (!((h[1]) & 1)) +#define MA_DR_MP3_HDR_TEST_PADDING(h) ((h[2]) & 0x2) +#define MA_DR_MP3_HDR_TEST_MPEG1(h) ((h[1]) & 0x8) +#define MA_DR_MP3_HDR_TEST_NOT_MPEG25(h) ((h[1]) & 0x10) +#define MA_DR_MP3_HDR_TEST_I_STEREO(h) ((h[3]) & 0x10) +#define MA_DR_MP3_HDR_TEST_MS_STEREO(h) ((h[3]) & 0x20) +#define MA_DR_MP3_HDR_GET_STEREO_MODE(h) (((h[3]) >> 6) & 3) +#define MA_DR_MP3_HDR_GET_STEREO_MODE_EXT(h) (((h[3]) >> 4) & 3) +#define MA_DR_MP3_HDR_GET_LAYER(h) (((h[1]) >> 1) & 3) +#define MA_DR_MP3_HDR_GET_BITRATE(h) ((h[2]) >> 4) +#define MA_DR_MP3_HDR_GET_SAMPLE_RATE(h) (((h[2]) >> 2) & 3) +#define MA_DR_MP3_HDR_GET_MY_SAMPLE_RATE(h) (MA_DR_MP3_HDR_GET_SAMPLE_RATE(h) + (((h[1] >> 3) & 1) + ((h[1] >> 4) & 1))*3) +#define MA_DR_MP3_HDR_IS_FRAME_576(h) ((h[1] & 14) == 2) +#define MA_DR_MP3_HDR_IS_LAYER_1(h) ((h[1] & 6) == 6) +#define MA_DR_MP3_BITS_DEQUANTIZER_OUT -1 +#define MA_DR_MP3_MAX_SCF (255 + MA_DR_MP3_BITS_DEQUANTIZER_OUT*4 - 210) +#define MA_DR_MP3_MAX_SCFI ((MA_DR_MP3_MAX_SCF + 3) & ~3) +#define MA_DR_MP3_MIN(a, b) ((a) > (b) ? (b) : (a)) +#define MA_DR_MP3_MAX(a, b) ((a) < (b) ? (b) : (a)) +#if !defined(MA_DR_MP3_NO_SIMD) +#if !defined(MA_DR_MP3_ONLY_SIMD) && (defined(_M_X64) || defined(__x86_64__) || defined(__aarch64__) || defined(_M_ARM64)) +#define MA_DR_MP3_ONLY_SIMD +#endif +#if ((defined(_MSC_VER) && _MSC_VER >= 1400) && defined(_M_X64)) || ((defined(__i386) || defined(_M_IX86) || defined(__i386__) || defined(__x86_64__)) && ((defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__))) +#if defined(_MSC_VER) +#include +#endif +#include +#define MA_DR_MP3_HAVE_SSE 1 +#define MA_DR_MP3_HAVE_SIMD 1 +#define MA_DR_MP3_VSTORE _mm_storeu_ps +#define MA_DR_MP3_VLD _mm_loadu_ps +#define MA_DR_MP3_VSET _mm_set1_ps +#define MA_DR_MP3_VADD _mm_add_ps +#define MA_DR_MP3_VSUB _mm_sub_ps +#define MA_DR_MP3_VMUL _mm_mul_ps +#define MA_DR_MP3_VMAC(a, x, y) _mm_add_ps(a, _mm_mul_ps(x, y)) +#define MA_DR_MP3_VMSB(a, x, y) _mm_sub_ps(a, _mm_mul_ps(x, y)) +#define MA_DR_MP3_VMUL_S(x, s) _mm_mul_ps(x, _mm_set1_ps(s)) +#define MA_DR_MP3_VREV(x) _mm_shuffle_ps(x, x, _MM_SHUFFLE(0, 1, 2, 3)) +typedef __m128 ma_dr_mp3_f4; +#if defined(_MSC_VER) || defined(MA_DR_MP3_ONLY_SIMD) +#define ma_dr_mp3_cpuid __cpuid +#else +static __inline__ __attribute__((always_inline)) void ma_dr_mp3_cpuid(int CPUInfo[], const int InfoType) +{ +#if defined(__PIC__) + __asm__ __volatile__( +#if defined(__x86_64__) + "push %%rbx\n" + "cpuid\n" + "xchgl %%ebx, %1\n" + "pop %%rbx\n" +#else + "xchgl %%ebx, %1\n" + "cpuid\n" + "xchgl %%ebx, %1\n" +#endif + : "=a" (CPUInfo[0]), "=r" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) + : "a" (InfoType)); +#else + __asm__ __volatile__( + "cpuid" + : "=a" (CPUInfo[0]), "=b" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) + : "a" (InfoType)); +#endif +} +#endif +static int ma_dr_mp3_have_simd(void) +{ +#ifdef MA_DR_MP3_ONLY_SIMD + return 1; +#else + static int g_have_simd; + int CPUInfo[4]; +#ifdef MINIMP3_TEST + static int g_counter; + if (g_counter++ > 100) + return 0; +#endif + if (g_have_simd) + goto end; + ma_dr_mp3_cpuid(CPUInfo, 0); + if (CPUInfo[0] > 0) + { + ma_dr_mp3_cpuid(CPUInfo, 1); + g_have_simd = (CPUInfo[3] & (1 << 26)) + 1; + return g_have_simd - 1; + } +end: + return g_have_simd - 1; +#endif +} +#elif defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64) +#include +#define MA_DR_MP3_HAVE_SSE 0 +#define MA_DR_MP3_HAVE_SIMD 1 +#define MA_DR_MP3_VSTORE vst1q_f32 +#define MA_DR_MP3_VLD vld1q_f32 +#define MA_DR_MP3_VSET vmovq_n_f32 +#define MA_DR_MP3_VADD vaddq_f32 +#define MA_DR_MP3_VSUB vsubq_f32 +#define MA_DR_MP3_VMUL vmulq_f32 +#define MA_DR_MP3_VMAC(a, x, y) vmlaq_f32(a, x, y) +#define MA_DR_MP3_VMSB(a, x, y) vmlsq_f32(a, x, y) +#define MA_DR_MP3_VMUL_S(x, s) vmulq_f32(x, vmovq_n_f32(s)) +#define MA_DR_MP3_VREV(x) vcombine_f32(vget_high_f32(vrev64q_f32(x)), vget_low_f32(vrev64q_f32(x))) +typedef float32x4_t ma_dr_mp3_f4; +static int ma_dr_mp3_have_simd(void) +{ + return 1; +} +#else +#define MA_DR_MP3_HAVE_SSE 0 +#define MA_DR_MP3_HAVE_SIMD 0 +#ifdef MA_DR_MP3_ONLY_SIMD +#error MA_DR_MP3_ONLY_SIMD used, but SSE/NEON not enabled +#endif +#endif +#else +#define MA_DR_MP3_HAVE_SIMD 0 +#endif +#if defined(__ARM_ARCH) && (__ARM_ARCH >= 6) && !defined(__aarch64__) && !defined(_M_ARM64) +#define MA_DR_MP3_HAVE_ARMV6 1 +static __inline__ __attribute__((always_inline)) ma_int32 ma_dr_mp3_clip_int16_arm(ma_int32 a) +{ + ma_int32 x = 0; + __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a)); + return x; +} +#else +#define MA_DR_MP3_HAVE_ARMV6 0 +#endif +#ifndef MA_DR_MP3_ASSERT +#include +#define MA_DR_MP3_ASSERT(expression) assert(expression) +#endif +#ifndef MA_DR_MP3_COPY_MEMORY +#define MA_DR_MP3_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_DR_MP3_MOVE_MEMORY +#define MA_DR_MP3_MOVE_MEMORY(dst, src, sz) memmove((dst), (src), (sz)) +#endif +#ifndef MA_DR_MP3_ZERO_MEMORY +#define MA_DR_MP3_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#define MA_DR_MP3_ZERO_OBJECT(p) MA_DR_MP3_ZERO_MEMORY((p), sizeof(*(p))) +#ifndef MA_DR_MP3_MALLOC +#define MA_DR_MP3_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_DR_MP3_REALLOC +#define MA_DR_MP3_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_DR_MP3_FREE +#define MA_DR_MP3_FREE(p) free((p)) +#endif +typedef struct +{ + const ma_uint8 *buf; + int pos, limit; +} ma_dr_mp3_bs; +typedef struct +{ + float scf[3*64]; + ma_uint8 total_bands, stereo_bands, bitalloc[64], scfcod[64]; +} ma_dr_mp3_L12_scale_info; +typedef struct +{ + ma_uint8 tab_offset, code_tab_width, band_count; +} ma_dr_mp3_L12_subband_alloc; +typedef struct +{ + const ma_uint8 *sfbtab; + ma_uint16 part_23_length, big_values, scalefac_compress; + ma_uint8 global_gain, block_type, mixed_block_flag, n_long_sfb, n_short_sfb; + ma_uint8 table_select[3], region_count[3], subblock_gain[3]; + ma_uint8 preflag, scalefac_scale, count1_table, scfsi; +} ma_dr_mp3_L3_gr_info; +typedef struct +{ + ma_dr_mp3_bs bs; + ma_uint8 maindata[MA_DR_MP3_MAX_BITRESERVOIR_BYTES + MA_DR_MP3_MAX_L3_FRAME_PAYLOAD_BYTES]; + ma_dr_mp3_L3_gr_info gr_info[4]; + float grbuf[2][576], scf[40], syn[18 + 15][2*32]; + ma_uint8 ist_pos[2][39]; +} ma_dr_mp3dec_scratch; +static void ma_dr_mp3_bs_init(ma_dr_mp3_bs *bs, const ma_uint8 *data, int bytes) +{ + bs->buf = data; + bs->pos = 0; + bs->limit = bytes*8; +} +static ma_uint32 ma_dr_mp3_bs_get_bits(ma_dr_mp3_bs *bs, int n) +{ + ma_uint32 next, cache = 0, s = bs->pos & 7; + int shl = n + s; + const ma_uint8 *p = bs->buf + (bs->pos >> 3); + if ((bs->pos += n) > bs->limit) + return 0; + next = *p++ & (255 >> s); + while ((shl -= 8) > 0) + { + cache |= next << shl; + next = *p++; + } + return cache | (next >> -shl); +} +static int ma_dr_mp3_hdr_valid(const ma_uint8 *h) +{ + return h[0] == 0xff && + ((h[1] & 0xF0) == 0xf0 || (h[1] & 0xFE) == 0xe2) && + (MA_DR_MP3_HDR_GET_LAYER(h) != 0) && + (MA_DR_MP3_HDR_GET_BITRATE(h) != 15) && + (MA_DR_MP3_HDR_GET_SAMPLE_RATE(h) != 3); +} +static int ma_dr_mp3_hdr_compare(const ma_uint8 *h1, const ma_uint8 *h2) +{ + return ma_dr_mp3_hdr_valid(h2) && + ((h1[1] ^ h2[1]) & 0xFE) == 0 && + ((h1[2] ^ h2[2]) & 0x0C) == 0 && + !(MA_DR_MP3_HDR_IS_FREE_FORMAT(h1) ^ MA_DR_MP3_HDR_IS_FREE_FORMAT(h2)); +} +static unsigned ma_dr_mp3_hdr_bitrate_kbps(const ma_uint8 *h) +{ + static const ma_uint8 halfrate[2][3][15] = { + { { 0,4,8,12,16,20,24,28,32,40,48,56,64,72,80 }, { 0,4,8,12,16,20,24,28,32,40,48,56,64,72,80 }, { 0,16,24,28,32,40,48,56,64,72,80,88,96,112,128 } }, + { { 0,16,20,24,28,32,40,48,56,64,80,96,112,128,160 }, { 0,16,24,28,32,40,48,56,64,80,96,112,128,160,192 }, { 0,16,32,48,64,80,96,112,128,144,160,176,192,208,224 } }, + }; + return 2*halfrate[!!MA_DR_MP3_HDR_TEST_MPEG1(h)][MA_DR_MP3_HDR_GET_LAYER(h) - 1][MA_DR_MP3_HDR_GET_BITRATE(h)]; +} +static unsigned ma_dr_mp3_hdr_sample_rate_hz(const ma_uint8 *h) +{ + static const unsigned g_hz[3] = { 44100, 48000, 32000 }; + return g_hz[MA_DR_MP3_HDR_GET_SAMPLE_RATE(h)] >> (int)!MA_DR_MP3_HDR_TEST_MPEG1(h) >> (int)!MA_DR_MP3_HDR_TEST_NOT_MPEG25(h); +} +static unsigned ma_dr_mp3_hdr_frame_samples(const ma_uint8 *h) +{ + return MA_DR_MP3_HDR_IS_LAYER_1(h) ? 384 : (1152 >> (int)MA_DR_MP3_HDR_IS_FRAME_576(h)); +} +static int ma_dr_mp3_hdr_frame_bytes(const ma_uint8 *h, int free_format_size) +{ + int frame_bytes = ma_dr_mp3_hdr_frame_samples(h)*ma_dr_mp3_hdr_bitrate_kbps(h)*125/ma_dr_mp3_hdr_sample_rate_hz(h); + if (MA_DR_MP3_HDR_IS_LAYER_1(h)) + { + frame_bytes &= ~3; + } + return frame_bytes ? frame_bytes : free_format_size; +} +static int ma_dr_mp3_hdr_padding(const ma_uint8 *h) +{ + return MA_DR_MP3_HDR_TEST_PADDING(h) ? (MA_DR_MP3_HDR_IS_LAYER_1(h) ? 4 : 1) : 0; +} +#ifndef MA_DR_MP3_ONLY_MP3 +static const ma_dr_mp3_L12_subband_alloc *ma_dr_mp3_L12_subband_alloc_table(const ma_uint8 *hdr, ma_dr_mp3_L12_scale_info *sci) +{ + const ma_dr_mp3_L12_subband_alloc *alloc; + int mode = MA_DR_MP3_HDR_GET_STEREO_MODE(hdr); + int nbands, stereo_bands = (mode == MA_DR_MP3_MODE_MONO) ? 0 : (mode == MA_DR_MP3_MODE_JOINT_STEREO) ? (MA_DR_MP3_HDR_GET_STEREO_MODE_EXT(hdr) << 2) + 4 : 32; + if (MA_DR_MP3_HDR_IS_LAYER_1(hdr)) + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L1[] = { { 76, 4, 32 } }; + alloc = g_alloc_L1; + nbands = 32; + } else if (!MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L2M2[] = { { 60, 4, 4 }, { 44, 3, 7 }, { 44, 2, 19 } }; + alloc = g_alloc_L2M2; + nbands = 30; + } else + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L2M1[] = { { 0, 4, 3 }, { 16, 4, 8 }, { 32, 3, 12 }, { 40, 2, 7 } }; + int sample_rate_idx = MA_DR_MP3_HDR_GET_SAMPLE_RATE(hdr); + unsigned kbps = ma_dr_mp3_hdr_bitrate_kbps(hdr) >> (int)(mode != MA_DR_MP3_MODE_MONO); + if (!kbps) + { + kbps = 192; + } + alloc = g_alloc_L2M1; + nbands = 27; + if (kbps < 56) + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L2M1_lowrate[] = { { 44, 4, 2 }, { 44, 3, 10 } }; + alloc = g_alloc_L2M1_lowrate; + nbands = sample_rate_idx == 2 ? 12 : 8; + } else if (kbps >= 96 && sample_rate_idx != 1) + { + nbands = 30; + } + } + sci->total_bands = (ma_uint8)nbands; + sci->stereo_bands = (ma_uint8)MA_DR_MP3_MIN(stereo_bands, nbands); + return alloc; +} +static void ma_dr_mp3_L12_read_scalefactors(ma_dr_mp3_bs *bs, ma_uint8 *pba, ma_uint8 *scfcod, int bands, float *scf) +{ + static const float g_deq_L12[18*3] = { +#define MA_DR_MP3_DQ(x) 9.53674316e-07f/x, 7.56931807e-07f/x, 6.00777173e-07f/x + MA_DR_MP3_DQ(3),MA_DR_MP3_DQ(7),MA_DR_MP3_DQ(15),MA_DR_MP3_DQ(31),MA_DR_MP3_DQ(63),MA_DR_MP3_DQ(127),MA_DR_MP3_DQ(255),MA_DR_MP3_DQ(511),MA_DR_MP3_DQ(1023),MA_DR_MP3_DQ(2047),MA_DR_MP3_DQ(4095),MA_DR_MP3_DQ(8191),MA_DR_MP3_DQ(16383),MA_DR_MP3_DQ(32767),MA_DR_MP3_DQ(65535),MA_DR_MP3_DQ(3),MA_DR_MP3_DQ(5),MA_DR_MP3_DQ(9) + }; + int i, m; + for (i = 0; i < bands; i++) + { + float s = 0; + int ba = *pba++; + int mask = ba ? 4 + ((19 >> scfcod[i]) & 3) : 0; + for (m = 4; m; m >>= 1) + { + if (mask & m) + { + int b = ma_dr_mp3_bs_get_bits(bs, 6); + s = g_deq_L12[ba*3 - 6 + b % 3]*(int)(1 << 21 >> b/3); + } + *scf++ = s; + } + } +} +static void ma_dr_mp3_L12_read_scale_info(const ma_uint8 *hdr, ma_dr_mp3_bs *bs, ma_dr_mp3_L12_scale_info *sci) +{ + static const ma_uint8 g_bitalloc_code_tab[] = { + 0,17, 3, 4, 5,6,7, 8,9,10,11,12,13,14,15,16, + 0,17,18, 3,19,4,5, 6,7, 8, 9,10,11,12,13,16, + 0,17,18, 3,19,4,5,16, + 0,17,18,16, + 0,17,18,19, 4,5,6, 7,8, 9,10,11,12,13,14,15, + 0,17,18, 3,19,4,5, 6,7, 8, 9,10,11,12,13,14, + 0, 2, 3, 4, 5,6,7, 8,9,10,11,12,13,14,15,16 + }; + const ma_dr_mp3_L12_subband_alloc *subband_alloc = ma_dr_mp3_L12_subband_alloc_table(hdr, sci); + int i, k = 0, ba_bits = 0; + const ma_uint8 *ba_code_tab = g_bitalloc_code_tab; + for (i = 0; i < sci->total_bands; i++) + { + ma_uint8 ba; + if (i == k) + { + k += subband_alloc->band_count; + ba_bits = subband_alloc->code_tab_width; + ba_code_tab = g_bitalloc_code_tab + subband_alloc->tab_offset; + subband_alloc++; + } + ba = ba_code_tab[ma_dr_mp3_bs_get_bits(bs, ba_bits)]; + sci->bitalloc[2*i] = ba; + if (i < sci->stereo_bands) + { + ba = ba_code_tab[ma_dr_mp3_bs_get_bits(bs, ba_bits)]; + } + sci->bitalloc[2*i + 1] = sci->stereo_bands ? ba : 0; + } + for (i = 0; i < 2*sci->total_bands; i++) + { + sci->scfcod[i] = (ma_uint8)(sci->bitalloc[i] ? MA_DR_MP3_HDR_IS_LAYER_1(hdr) ? 2 : ma_dr_mp3_bs_get_bits(bs, 2) : 6); + } + ma_dr_mp3_L12_read_scalefactors(bs, sci->bitalloc, sci->scfcod, sci->total_bands*2, sci->scf); + for (i = sci->stereo_bands; i < sci->total_bands; i++) + { + sci->bitalloc[2*i + 1] = 0; + } +} +static int ma_dr_mp3_L12_dequantize_granule(float *grbuf, ma_dr_mp3_bs *bs, ma_dr_mp3_L12_scale_info *sci, int group_size) +{ + int i, j, k, choff = 576; + for (j = 0; j < 4; j++) + { + float *dst = grbuf + group_size*j; + for (i = 0; i < 2*sci->total_bands; i++) + { + int ba = sci->bitalloc[i]; + if (ba != 0) + { + if (ba < 17) + { + int half = (1 << (ba - 1)) - 1; + for (k = 0; k < group_size; k++) + { + dst[k] = (float)((int)ma_dr_mp3_bs_get_bits(bs, ba) - half); + } + } else + { + unsigned mod = (2 << (ba - 17)) + 1; + unsigned code = ma_dr_mp3_bs_get_bits(bs, mod + 2 - (mod >> 3)); + for (k = 0; k < group_size; k++, code /= mod) + { + dst[k] = (float)((int)(code % mod - mod/2)); + } + } + } + dst += choff; + choff = 18 - choff; + } + } + return group_size*4; +} +static void ma_dr_mp3_L12_apply_scf_384(ma_dr_mp3_L12_scale_info *sci, const float *scf, float *dst) +{ + int i, k; + MA_DR_MP3_COPY_MEMORY(dst + 576 + sci->stereo_bands*18, dst + sci->stereo_bands*18, (sci->total_bands - sci->stereo_bands)*18*sizeof(float)); + for (i = 0; i < sci->total_bands; i++, dst += 18, scf += 6) + { + for (k = 0; k < 12; k++) + { + dst[k + 0] *= scf[0]; + dst[k + 576] *= scf[3]; + } + } +} +#endif +static int ma_dr_mp3_L3_read_side_info(ma_dr_mp3_bs *bs, ma_dr_mp3_L3_gr_info *gr, const ma_uint8 *hdr) +{ + static const ma_uint8 g_scf_long[8][23] = { + { 6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54,0 }, + { 12,12,12,12,12,12,16,20,24,28,32,40,48,56,64,76,90,2,2,2,2,2,0 }, + { 6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54,0 }, + { 6,6,6,6,6,6,8,10,12,14,16,18,22,26,32,38,46,54,62,70,76,36,0 }, + { 6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54,0 }, + { 4,4,4,4,4,4,6,6,8,8,10,12,16,20,24,28,34,42,50,54,76,158,0 }, + { 4,4,4,4,4,4,6,6,6,8,10,12,16,18,22,28,34,40,46,54,54,192,0 }, + { 4,4,4,4,4,4,6,6,8,10,12,16,20,24,30,38,46,56,68,84,102,26,0 } + }; + static const ma_uint8 g_scf_short[8][40] = { + { 4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 8,8,8,8,8,8,8,8,8,12,12,12,16,16,16,20,20,20,24,24,24,28,28,28,36,36,36,2,2,2,2,2,2,2,2,2,26,26,26,0 }, + { 4,4,4,4,4,4,4,4,4,6,6,6,6,6,6,8,8,8,10,10,10,14,14,14,18,18,18,26,26,26,32,32,32,42,42,42,18,18,18,0 }, + { 4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,32,32,32,44,44,44,12,12,12,0 }, + { 4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 4,4,4,4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,22,22,22,30,30,30,56,56,56,0 }, + { 4,4,4,4,4,4,4,4,4,4,4,4,6,6,6,6,6,6,10,10,10,12,12,12,14,14,14,16,16,16,20,20,20,26,26,26,66,66,66,0 }, + { 4,4,4,4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,12,12,12,16,16,16,20,20,20,26,26,26,34,34,34,42,42,42,12,12,12,0 } + }; + static const ma_uint8 g_scf_mixed[8][40] = { + { 6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 12,12,12,4,4,4,8,8,8,12,12,12,16,16,16,20,20,20,24,24,24,28,28,28,36,36,36,2,2,2,2,2,2,2,2,2,26,26,26,0 }, + { 6,6,6,6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,14,14,14,18,18,18,26,26,26,32,32,32,42,42,42,18,18,18,0 }, + { 6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,32,32,32,44,44,44,12,12,12,0 }, + { 6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 4,4,4,4,4,4,6,6,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,22,22,22,30,30,30,56,56,56,0 }, + { 4,4,4,4,4,4,6,6,4,4,4,6,6,6,6,6,6,10,10,10,12,12,12,14,14,14,16,16,16,20,20,20,26,26,26,66,66,66,0 }, + { 4,4,4,4,4,4,6,6,4,4,4,6,6,6,8,8,8,12,12,12,16,16,16,20,20,20,26,26,26,34,34,34,42,42,42,12,12,12,0 } + }; + unsigned tables, scfsi = 0; + int main_data_begin, part_23_sum = 0; + int gr_count = MA_DR_MP3_HDR_IS_MONO(hdr) ? 1 : 2; + int sr_idx = MA_DR_MP3_HDR_GET_MY_SAMPLE_RATE(hdr); sr_idx -= (sr_idx != 0); + if (MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + gr_count *= 2; + main_data_begin = ma_dr_mp3_bs_get_bits(bs, 9); + scfsi = ma_dr_mp3_bs_get_bits(bs, 7 + gr_count); + } else + { + main_data_begin = ma_dr_mp3_bs_get_bits(bs, 8 + gr_count) >> gr_count; + } + do + { + if (MA_DR_MP3_HDR_IS_MONO(hdr)) + { + scfsi <<= 4; + } + gr->part_23_length = (ma_uint16)ma_dr_mp3_bs_get_bits(bs, 12); + part_23_sum += gr->part_23_length; + gr->big_values = (ma_uint16)ma_dr_mp3_bs_get_bits(bs, 9); + if (gr->big_values > 288) + { + return -1; + } + gr->global_gain = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 8); + gr->scalefac_compress = (ma_uint16)ma_dr_mp3_bs_get_bits(bs, MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 4 : 9); + gr->sfbtab = g_scf_long[sr_idx]; + gr->n_long_sfb = 22; + gr->n_short_sfb = 0; + if (ma_dr_mp3_bs_get_bits(bs, 1)) + { + gr->block_type = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 2); + if (!gr->block_type) + { + return -1; + } + gr->mixed_block_flag = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 1); + gr->region_count[0] = 7; + gr->region_count[1] = 255; + if (gr->block_type == MA_DR_MP3_SHORT_BLOCK_TYPE) + { + scfsi &= 0x0F0F; + if (!gr->mixed_block_flag) + { + gr->region_count[0] = 8; + gr->sfbtab = g_scf_short[sr_idx]; + gr->n_long_sfb = 0; + gr->n_short_sfb = 39; + } else + { + gr->sfbtab = g_scf_mixed[sr_idx]; + gr->n_long_sfb = MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 8 : 6; + gr->n_short_sfb = 30; + } + } + tables = ma_dr_mp3_bs_get_bits(bs, 10); + tables <<= 5; + gr->subblock_gain[0] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + gr->subblock_gain[1] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + gr->subblock_gain[2] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + } else + { + gr->block_type = 0; + gr->mixed_block_flag = 0; + tables = ma_dr_mp3_bs_get_bits(bs, 15); + gr->region_count[0] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 4); + gr->region_count[1] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + gr->region_count[2] = 255; + } + gr->table_select[0] = (ma_uint8)(tables >> 10); + gr->table_select[1] = (ma_uint8)((tables >> 5) & 31); + gr->table_select[2] = (ma_uint8)((tables) & 31); + gr->preflag = (ma_uint8)(MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? ma_dr_mp3_bs_get_bits(bs, 1) : (gr->scalefac_compress >= 500)); + gr->scalefac_scale = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 1); + gr->count1_table = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 1); + gr->scfsi = (ma_uint8)((scfsi >> 12) & 15); + scfsi <<= 4; + gr++; + } while(--gr_count); + if (part_23_sum + bs->pos > bs->limit + main_data_begin*8) + { + return -1; + } + return main_data_begin; +} +static void ma_dr_mp3_L3_read_scalefactors(ma_uint8 *scf, ma_uint8 *ist_pos, const ma_uint8 *scf_size, const ma_uint8 *scf_count, ma_dr_mp3_bs *bitbuf, int scfsi) +{ + int i, k; + for (i = 0; i < 4 && scf_count[i]; i++, scfsi *= 2) + { + int cnt = scf_count[i]; + if (scfsi & 8) + { + MA_DR_MP3_COPY_MEMORY(scf, ist_pos, cnt); + } else + { + int bits = scf_size[i]; + if (!bits) + { + MA_DR_MP3_ZERO_MEMORY(scf, cnt); + MA_DR_MP3_ZERO_MEMORY(ist_pos, cnt); + } else + { + int max_scf = (scfsi < 0) ? (1 << bits) - 1 : -1; + for (k = 0; k < cnt; k++) + { + int s = ma_dr_mp3_bs_get_bits(bitbuf, bits); + ist_pos[k] = (ma_uint8)(s == max_scf ? -1 : s); + scf[k] = (ma_uint8)s; + } + } + } + ist_pos += cnt; + scf += cnt; + } + scf[0] = scf[1] = scf[2] = 0; +} +static float ma_dr_mp3_L3_ldexp_q2(float y, int exp_q2) +{ + static const float g_expfrac[4] = { 9.31322575e-10f,7.83145814e-10f,6.58544508e-10f,5.53767716e-10f }; + int e; + do + { + e = MA_DR_MP3_MIN(30*4, exp_q2); + y *= g_expfrac[e & 3]*(1 << 30 >> (e >> 2)); + } while ((exp_q2 -= e) > 0); + return y; +} +static void ma_dr_mp3_L3_decode_scalefactors(const ma_uint8 *hdr, ma_uint8 *ist_pos, ma_dr_mp3_bs *bs, const ma_dr_mp3_L3_gr_info *gr, float *scf, int ch) +{ + static const ma_uint8 g_scf_partitions[3][28] = { + { 6,5,5, 5,6,5,5,5,6,5, 7,3,11,10,0,0, 7, 7, 7,0, 6, 6,6,3, 8, 8,5,0 }, + { 8,9,6,12,6,9,9,9,6,9,12,6,15,18,0,0, 6,15,12,0, 6,12,9,6, 6,18,9,0 }, + { 9,9,6,12,9,9,9,9,9,9,12,6,18,18,0,0,12,12,12,0,12, 9,9,6,15,12,9,0 } + }; + const ma_uint8 *scf_partition = g_scf_partitions[!!gr->n_short_sfb + !gr->n_long_sfb]; + ma_uint8 scf_size[4], iscf[40]; + int i, scf_shift = gr->scalefac_scale + 1, gain_exp, scfsi = gr->scfsi; + float gain; + if (MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + static const ma_uint8 g_scfc_decode[16] = { 0,1,2,3, 12,5,6,7, 9,10,11,13, 14,15,18,19 }; + int part = g_scfc_decode[gr->scalefac_compress]; + scf_size[1] = scf_size[0] = (ma_uint8)(part >> 2); + scf_size[3] = scf_size[2] = (ma_uint8)(part & 3); + } else + { + static const ma_uint8 g_mod[6*4] = { 5,5,4,4,5,5,4,1,4,3,1,1,5,6,6,1,4,4,4,1,4,3,1,1 }; + int k, modprod, sfc, ist = MA_DR_MP3_HDR_TEST_I_STEREO(hdr) && ch; + sfc = gr->scalefac_compress >> ist; + for (k = ist*3*4; sfc >= 0; sfc -= modprod, k += 4) + { + for (modprod = 1, i = 3; i >= 0; i--) + { + scf_size[i] = (ma_uint8)(sfc / modprod % g_mod[k + i]); + modprod *= g_mod[k + i]; + } + } + scf_partition += k; + scfsi = -16; + } + ma_dr_mp3_L3_read_scalefactors(iscf, ist_pos, scf_size, scf_partition, bs, scfsi); + if (gr->n_short_sfb) + { + int sh = 3 - scf_shift; + for (i = 0; i < gr->n_short_sfb; i += 3) + { + iscf[gr->n_long_sfb + i + 0] = (ma_uint8)(iscf[gr->n_long_sfb + i + 0] + (gr->subblock_gain[0] << sh)); + iscf[gr->n_long_sfb + i + 1] = (ma_uint8)(iscf[gr->n_long_sfb + i + 1] + (gr->subblock_gain[1] << sh)); + iscf[gr->n_long_sfb + i + 2] = (ma_uint8)(iscf[gr->n_long_sfb + i + 2] + (gr->subblock_gain[2] << sh)); + } + } else if (gr->preflag) + { + static const ma_uint8 g_preamp[10] = { 1,1,1,1,2,2,3,3,3,2 }; + for (i = 0; i < 10; i++) + { + iscf[11 + i] = (ma_uint8)(iscf[11 + i] + g_preamp[i]); + } + } + gain_exp = gr->global_gain + MA_DR_MP3_BITS_DEQUANTIZER_OUT*4 - 210 - (MA_DR_MP3_HDR_IS_MS_STEREO(hdr) ? 2 : 0); + gain = ma_dr_mp3_L3_ldexp_q2(1 << (MA_DR_MP3_MAX_SCFI/4), MA_DR_MP3_MAX_SCFI - gain_exp); + for (i = 0; i < (int)(gr->n_long_sfb + gr->n_short_sfb); i++) + { + scf[i] = ma_dr_mp3_L3_ldexp_q2(gain, iscf[i] << scf_shift); + } +} +static const float g_ma_dr_mp3_pow43[129 + 16] = { + 0,-1,-2.519842f,-4.326749f,-6.349604f,-8.549880f,-10.902724f,-13.390518f,-16.000000f,-18.720754f,-21.544347f,-24.463781f,-27.473142f,-30.567351f,-33.741992f,-36.993181f, + 0,1,2.519842f,4.326749f,6.349604f,8.549880f,10.902724f,13.390518f,16.000000f,18.720754f,21.544347f,24.463781f,27.473142f,30.567351f,33.741992f,36.993181f,40.317474f,43.711787f,47.173345f,50.699631f,54.288352f,57.937408f,61.644865f,65.408941f,69.227979f,73.100443f,77.024898f,81.000000f,85.024491f,89.097188f,93.216975f,97.382800f,101.593667f,105.848633f,110.146801f,114.487321f,118.869381f,123.292209f,127.755065f,132.257246f,136.798076f,141.376907f,145.993119f,150.646117f,155.335327f,160.060199f,164.820202f,169.614826f,174.443577f,179.305980f,184.201575f,189.129918f,194.090580f,199.083145f,204.107210f,209.162385f,214.248292f,219.364564f,224.510845f,229.686789f,234.892058f,240.126328f,245.389280f,250.680604f,256.000000f,261.347174f,266.721841f,272.123723f,277.552547f,283.008049f,288.489971f,293.998060f,299.532071f,305.091761f,310.676898f,316.287249f,321.922592f,327.582707f,333.267377f,338.976394f,344.709550f,350.466646f,356.247482f,362.051866f,367.879608f,373.730522f,379.604427f,385.501143f,391.420496f,397.362314f,403.326427f,409.312672f,415.320884f,421.350905f,427.402579f,433.475750f,439.570269f,445.685987f,451.822757f,457.980436f,464.158883f,470.357960f,476.577530f,482.817459f,489.077615f,495.357868f,501.658090f,507.978156f,514.317941f,520.677324f,527.056184f,533.454404f,539.871867f,546.308458f,552.764065f,559.238575f,565.731879f,572.243870f,578.774440f,585.323483f,591.890898f,598.476581f,605.080431f,611.702349f,618.342238f,625.000000f,631.675540f,638.368763f,645.079578f +}; +static float ma_dr_mp3_L3_pow_43(int x) +{ + float frac; + int sign, mult = 256; + if (x < 129) + { + return g_ma_dr_mp3_pow43[16 + x]; + } + if (x < 1024) + { + mult = 16; + x <<= 3; + } + sign = 2*x & 64; + frac = (float)((x & 63) - sign) / ((x & ~63) + sign); + return g_ma_dr_mp3_pow43[16 + ((x + sign) >> 6)]*(1.f + frac*((4.f/3) + frac*(2.f/9)))*mult; +} +static void ma_dr_mp3_L3_huffman(float *dst, ma_dr_mp3_bs *bs, const ma_dr_mp3_L3_gr_info *gr_info, const float *scf, int layer3gr_limit) +{ + static const ma_int16 tabs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 785,785,785,785,784,784,784,784,513,513,513,513,513,513,513,513,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256, + -255,1313,1298,1282,785,785,785,785,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,290,288, + -255,1313,1298,1282,769,769,769,769,529,529,529,529,529,529,529,529,528,528,528,528,528,528,528,528,512,512,512,512,512,512,512,512,290,288, + -253,-318,-351,-367,785,785,785,785,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,819,818,547,547,275,275,275,275,561,560,515,546,289,274,288,258, + -254,-287,1329,1299,1314,1312,1057,1057,1042,1042,1026,1026,784,784,784,784,529,529,529,529,529,529,529,529,769,769,769,769,768,768,768,768,563,560,306,306,291,259, + -252,-413,-477,-542,1298,-575,1041,1041,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-383,-399,1107,1092,1106,1061,849,849,789,789,1104,1091,773,773,1076,1075,341,340,325,309,834,804,577,577,532,532,516,516,832,818,803,816,561,561,531,531,515,546,289,289,288,258, + -252,-429,-493,-559,1057,1057,1042,1042,529,529,529,529,529,529,529,529,784,784,784,784,769,769,769,769,512,512,512,512,512,512,512,512,-382,1077,-415,1106,1061,1104,849,849,789,789,1091,1076,1029,1075,834,834,597,581,340,340,339,324,804,833,532,532,832,772,818,803,817,787,816,771,290,290,290,290,288,258, + -253,-349,-414,-447,-463,1329,1299,-479,1314,1312,1057,1057,1042,1042,1026,1026,785,785,785,785,784,784,784,784,769,769,769,769,768,768,768,768,-319,851,821,-335,836,850,805,849,341,340,325,336,533,533,579,579,564,564,773,832,578,548,563,516,321,276,306,291,304,259, + -251,-572,-733,-830,-863,-879,1041,1041,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-511,-527,-543,1396,1351,1381,1366,1395,1335,1380,-559,1334,1138,1138,1063,1063,1350,1392,1031,1031,1062,1062,1364,1363,1120,1120,1333,1348,881,881,881,881,375,374,359,373,343,358,341,325,791,791,1123,1122,-703,1105,1045,-719,865,865,790,790,774,774,1104,1029,338,293,323,308,-799,-815,833,788,772,818,803,816,322,292,307,320,561,531,515,546,289,274,288,258, + -251,-525,-605,-685,-765,-831,-846,1298,1057,1057,1312,1282,785,785,785,785,784,784,784,784,769,769,769,769,512,512,512,512,512,512,512,512,1399,1398,1383,1367,1382,1396,1351,-511,1381,1366,1139,1139,1079,1079,1124,1124,1364,1349,1363,1333,882,882,882,882,807,807,807,807,1094,1094,1136,1136,373,341,535,535,881,775,867,822,774,-591,324,338,-671,849,550,550,866,864,609,609,293,336,534,534,789,835,773,-751,834,804,308,307,833,788,832,772,562,562,547,547,305,275,560,515,290,290, + -252,-397,-477,-557,-622,-653,-719,-735,-750,1329,1299,1314,1057,1057,1042,1042,1312,1282,1024,1024,785,785,785,785,784,784,784,784,769,769,769,769,-383,1127,1141,1111,1126,1140,1095,1110,869,869,883,883,1079,1109,882,882,375,374,807,868,838,881,791,-463,867,822,368,263,852,837,836,-543,610,610,550,550,352,336,534,534,865,774,851,821,850,805,593,533,579,564,773,832,578,578,548,548,577,577,307,276,306,291,516,560,259,259, + -250,-2107,-2507,-2764,-2909,-2974,-3007,-3023,1041,1041,1040,1040,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-767,-1052,-1213,-1277,-1358,-1405,-1469,-1535,-1550,-1582,-1614,-1647,-1662,-1694,-1726,-1759,-1774,-1807,-1822,-1854,-1886,1565,-1919,-1935,-1951,-1967,1731,1730,1580,1717,-1983,1729,1564,-1999,1548,-2015,-2031,1715,1595,-2047,1714,-2063,1610,-2079,1609,-2095,1323,1323,1457,1457,1307,1307,1712,1547,1641,1700,1699,1594,1685,1625,1442,1442,1322,1322,-780,-973,-910,1279,1278,1277,1262,1276,1261,1275,1215,1260,1229,-959,974,974,989,989,-943,735,478,478,495,463,506,414,-1039,1003,958,1017,927,942,987,957,431,476,1272,1167,1228,-1183,1256,-1199,895,895,941,941,1242,1227,1212,1135,1014,1014,490,489,503,487,910,1013,985,925,863,894,970,955,1012,847,-1343,831,755,755,984,909,428,366,754,559,-1391,752,486,457,924,997,698,698,983,893,740,740,908,877,739,739,667,667,953,938,497,287,271,271,683,606,590,712,726,574,302,302,738,736,481,286,526,725,605,711,636,724,696,651,589,681,666,710,364,467,573,695,466,466,301,465,379,379,709,604,665,679,316,316,634,633,436,436,464,269,424,394,452,332,438,363,347,408,393,448,331,422,362,407,392,421,346,406,391,376,375,359,1441,1306,-2367,1290,-2383,1337,-2399,-2415,1426,1321,-2431,1411,1336,-2447,-2463,-2479,1169,1169,1049,1049,1424,1289,1412,1352,1319,-2495,1154,1154,1064,1064,1153,1153,416,390,360,404,403,389,344,374,373,343,358,372,327,357,342,311,356,326,1395,1394,1137,1137,1047,1047,1365,1392,1287,1379,1334,1364,1349,1378,1318,1363,792,792,792,792,1152,1152,1032,1032,1121,1121,1046,1046,1120,1120,1030,1030,-2895,1106,1061,1104,849,849,789,789,1091,1076,1029,1090,1060,1075,833,833,309,324,532,532,832,772,818,803,561,561,531,560,515,546,289,274,288,258, + -250,-1179,-1579,-1836,-1996,-2124,-2253,-2333,-2413,-2477,-2542,-2574,-2607,-2622,-2655,1314,1313,1298,1312,1282,785,785,785,785,1040,1040,1025,1025,768,768,768,768,-766,-798,-830,-862,-895,-911,-927,-943,-959,-975,-991,-1007,-1023,-1039,-1055,-1070,1724,1647,-1103,-1119,1631,1767,1662,1738,1708,1723,-1135,1780,1615,1779,1599,1677,1646,1778,1583,-1151,1777,1567,1737,1692,1765,1722,1707,1630,1751,1661,1764,1614,1736,1676,1763,1750,1645,1598,1721,1691,1762,1706,1582,1761,1566,-1167,1749,1629,767,766,751,765,494,494,735,764,719,749,734,763,447,447,748,718,477,506,431,491,446,476,461,505,415,430,475,445,504,399,460,489,414,503,383,474,429,459,502,502,746,752,488,398,501,473,413,472,486,271,480,270,-1439,-1455,1357,-1471,-1487,-1503,1341,1325,-1519,1489,1463,1403,1309,-1535,1372,1448,1418,1476,1356,1462,1387,-1551,1475,1340,1447,1402,1386,-1567,1068,1068,1474,1461,455,380,468,440,395,425,410,454,364,467,466,464,453,269,409,448,268,432,1371,1473,1432,1417,1308,1460,1355,1446,1459,1431,1083,1083,1401,1416,1458,1445,1067,1067,1370,1457,1051,1051,1291,1430,1385,1444,1354,1415,1400,1443,1082,1082,1173,1113,1186,1066,1185,1050,-1967,1158,1128,1172,1097,1171,1081,-1983,1157,1112,416,266,375,400,1170,1142,1127,1065,793,793,1169,1033,1156,1096,1141,1111,1155,1080,1126,1140,898,898,808,808,897,897,792,792,1095,1152,1032,1125,1110,1139,1079,1124,882,807,838,881,853,791,-2319,867,368,263,822,852,837,866,806,865,-2399,851,352,262,534,534,821,836,594,594,549,549,593,593,533,533,848,773,579,579,564,578,548,563,276,276,577,576,306,291,516,560,305,305,275,259, + -251,-892,-2058,-2620,-2828,-2957,-3023,-3039,1041,1041,1040,1040,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-511,-527,-543,-559,1530,-575,-591,1528,1527,1407,1526,1391,1023,1023,1023,1023,1525,1375,1268,1268,1103,1103,1087,1087,1039,1039,1523,-604,815,815,815,815,510,495,509,479,508,463,507,447,431,505,415,399,-734,-782,1262,-815,1259,1244,-831,1258,1228,-847,-863,1196,-879,1253,987,987,748,-767,493,493,462,477,414,414,686,669,478,446,461,445,474,429,487,458,412,471,1266,1264,1009,1009,799,799,-1019,-1276,-1452,-1581,-1677,-1757,-1821,-1886,-1933,-1997,1257,1257,1483,1468,1512,1422,1497,1406,1467,1496,1421,1510,1134,1134,1225,1225,1466,1451,1374,1405,1252,1252,1358,1480,1164,1164,1251,1251,1238,1238,1389,1465,-1407,1054,1101,-1423,1207,-1439,830,830,1248,1038,1237,1117,1223,1148,1236,1208,411,426,395,410,379,269,1193,1222,1132,1235,1221,1116,976,976,1192,1162,1177,1220,1131,1191,963,963,-1647,961,780,-1663,558,558,994,993,437,408,393,407,829,978,813,797,947,-1743,721,721,377,392,844,950,828,890,706,706,812,859,796,960,948,843,934,874,571,571,-1919,690,555,689,421,346,539,539,944,779,918,873,932,842,903,888,570,570,931,917,674,674,-2575,1562,-2591,1609,-2607,1654,1322,1322,1441,1441,1696,1546,1683,1593,1669,1624,1426,1426,1321,1321,1639,1680,1425,1425,1305,1305,1545,1668,1608,1623,1667,1592,1638,1666,1320,1320,1652,1607,1409,1409,1304,1304,1288,1288,1664,1637,1395,1395,1335,1335,1622,1636,1394,1394,1319,1319,1606,1621,1392,1392,1137,1137,1137,1137,345,390,360,375,404,373,1047,-2751,-2767,-2783,1062,1121,1046,-2799,1077,-2815,1106,1061,789,789,1105,1104,263,355,310,340,325,354,352,262,339,324,1091,1076,1029,1090,1060,1075,833,833,788,788,1088,1028,818,818,803,803,561,561,531,531,816,771,546,546,289,274,288,258, + -253,-317,-381,-446,-478,-509,1279,1279,-811,-1179,-1451,-1756,-1900,-2028,-2189,-2253,-2333,-2414,-2445,-2511,-2526,1313,1298,-2559,1041,1041,1040,1040,1025,1025,1024,1024,1022,1007,1021,991,1020,975,1019,959,687,687,1018,1017,671,671,655,655,1016,1015,639,639,758,758,623,623,757,607,756,591,755,575,754,559,543,543,1009,783,-575,-621,-685,-749,496,-590,750,749,734,748,974,989,1003,958,988,973,1002,942,987,957,972,1001,926,986,941,971,956,1000,910,985,925,999,894,970,-1071,-1087,-1102,1390,-1135,1436,1509,1451,1374,-1151,1405,1358,1480,1420,-1167,1507,1494,1389,1342,1465,1435,1450,1326,1505,1310,1493,1373,1479,1404,1492,1464,1419,428,443,472,397,736,526,464,464,486,457,442,471,484,482,1357,1449,1434,1478,1388,1491,1341,1490,1325,1489,1463,1403,1309,1477,1372,1448,1418,1433,1476,1356,1462,1387,-1439,1475,1340,1447,1402,1474,1324,1461,1371,1473,269,448,1432,1417,1308,1460,-1711,1459,-1727,1441,1099,1099,1446,1386,1431,1401,-1743,1289,1083,1083,1160,1160,1458,1445,1067,1067,1370,1457,1307,1430,1129,1129,1098,1098,268,432,267,416,266,400,-1887,1144,1187,1082,1173,1113,1186,1066,1050,1158,1128,1143,1172,1097,1171,1081,420,391,1157,1112,1170,1142,1127,1065,1169,1049,1156,1096,1141,1111,1155,1080,1126,1154,1064,1153,1140,1095,1048,-2159,1125,1110,1137,-2175,823,823,1139,1138,807,807,384,264,368,263,868,838,853,791,867,822,852,837,866,806,865,790,-2319,851,821,836,352,262,850,805,849,-2399,533,533,835,820,336,261,578,548,563,577,532,532,832,772,562,562,547,547,305,275,560,515,290,290,288,258 }; + static const ma_uint8 tab32[] = { 130,162,193,209,44,28,76,140,9,9,9,9,9,9,9,9,190,254,222,238,126,94,157,157,109,61,173,205}; + static const ma_uint8 tab33[] = { 252,236,220,204,188,172,156,140,124,108,92,76,60,44,28,12 }; + static const ma_int16 tabindex[2*16] = { 0,32,64,98,0,132,180,218,292,364,426,538,648,746,0,1126,1460,1460,1460,1460,1460,1460,1460,1460,1842,1842,1842,1842,1842,1842,1842,1842 }; + static const ma_uint8 g_linbits[] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4,6,8,10,13,4,5,6,7,8,9,11,13 }; +#define MA_DR_MP3_PEEK_BITS(n) (bs_cache >> (32 - (n))) +#define MA_DR_MP3_FLUSH_BITS(n) { bs_cache <<= (n); bs_sh += (n); } +#define MA_DR_MP3_CHECK_BITS while (bs_sh >= 0) { bs_cache |= (ma_uint32)*bs_next_ptr++ << bs_sh; bs_sh -= 8; } +#define MA_DR_MP3_BSPOS ((bs_next_ptr - bs->buf)*8 - 24 + bs_sh) + float one = 0.0f; + int ireg = 0, big_val_cnt = gr_info->big_values; + const ma_uint8 *sfb = gr_info->sfbtab; + const ma_uint8 *bs_next_ptr = bs->buf + bs->pos/8; + ma_uint32 bs_cache = (((bs_next_ptr[0]*256u + bs_next_ptr[1])*256u + bs_next_ptr[2])*256u + bs_next_ptr[3]) << (bs->pos & 7); + int pairs_to_decode, np, bs_sh = (bs->pos & 7) - 8; + bs_next_ptr += 4; + while (big_val_cnt > 0) + { + int tab_num = gr_info->table_select[ireg]; + int sfb_cnt = gr_info->region_count[ireg++]; + const ma_int16 *codebook = tabs + tabindex[tab_num]; + int linbits = g_linbits[tab_num]; + if (linbits) + { + do + { + np = *sfb++ / 2; + pairs_to_decode = MA_DR_MP3_MIN(big_val_cnt, np); + one = *scf++; + do + { + int j, w = 5; + int leaf = codebook[MA_DR_MP3_PEEK_BITS(w)]; + while (leaf < 0) + { + MA_DR_MP3_FLUSH_BITS(w); + w = leaf & 7; + leaf = codebook[MA_DR_MP3_PEEK_BITS(w) - (leaf >> 3)]; + } + MA_DR_MP3_FLUSH_BITS(leaf >> 8); + for (j = 0; j < 2; j++, dst++, leaf >>= 4) + { + int lsb = leaf & 0x0F; + if (lsb == 15) + { + lsb += MA_DR_MP3_PEEK_BITS(linbits); + MA_DR_MP3_FLUSH_BITS(linbits); + MA_DR_MP3_CHECK_BITS; + *dst = one*ma_dr_mp3_L3_pow_43(lsb)*((ma_int32)bs_cache < 0 ? -1: 1); + } else + { + *dst = g_ma_dr_mp3_pow43[16 + lsb - 16*(bs_cache >> 31)]*one; + } + MA_DR_MP3_FLUSH_BITS(lsb ? 1 : 0); + } + MA_DR_MP3_CHECK_BITS; + } while (--pairs_to_decode); + } while ((big_val_cnt -= np) > 0 && --sfb_cnt >= 0); + } else + { + do + { + np = *sfb++ / 2; + pairs_to_decode = MA_DR_MP3_MIN(big_val_cnt, np); + one = *scf++; + do + { + int j, w = 5; + int leaf = codebook[MA_DR_MP3_PEEK_BITS(w)]; + while (leaf < 0) + { + MA_DR_MP3_FLUSH_BITS(w); + w = leaf & 7; + leaf = codebook[MA_DR_MP3_PEEK_BITS(w) - (leaf >> 3)]; + } + MA_DR_MP3_FLUSH_BITS(leaf >> 8); + for (j = 0; j < 2; j++, dst++, leaf >>= 4) + { + int lsb = leaf & 0x0F; + *dst = g_ma_dr_mp3_pow43[16 + lsb - 16*(bs_cache >> 31)]*one; + MA_DR_MP3_FLUSH_BITS(lsb ? 1 : 0); + } + MA_DR_MP3_CHECK_BITS; + } while (--pairs_to_decode); + } while ((big_val_cnt -= np) > 0 && --sfb_cnt >= 0); + } + } + for (np = 1 - big_val_cnt;; dst += 4) + { + const ma_uint8 *codebook_count1 = (gr_info->count1_table) ? tab33 : tab32; + int leaf = codebook_count1[MA_DR_MP3_PEEK_BITS(4)]; + if (!(leaf & 8)) + { + leaf = codebook_count1[(leaf >> 3) + (bs_cache << 4 >> (32 - (leaf & 3)))]; + } + MA_DR_MP3_FLUSH_BITS(leaf & 7); + if (MA_DR_MP3_BSPOS > layer3gr_limit) + { + break; + } +#define MA_DR_MP3_RELOAD_SCALEFACTOR if (!--np) { np = *sfb++/2; if (!np) break; one = *scf++; } +#define MA_DR_MP3_DEQ_COUNT1(s) if (leaf & (128 >> s)) { dst[s] = ((ma_int32)bs_cache < 0) ? -one : one; MA_DR_MP3_FLUSH_BITS(1) } + MA_DR_MP3_RELOAD_SCALEFACTOR; + MA_DR_MP3_DEQ_COUNT1(0); + MA_DR_MP3_DEQ_COUNT1(1); + MA_DR_MP3_RELOAD_SCALEFACTOR; + MA_DR_MP3_DEQ_COUNT1(2); + MA_DR_MP3_DEQ_COUNT1(3); + MA_DR_MP3_CHECK_BITS; + } + bs->pos = layer3gr_limit; +} +static void ma_dr_mp3_L3_midside_stereo(float *left, int n) +{ + int i = 0; + float *right = left + 576; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) + { + for (; i < n - 3; i += 4) + { + ma_dr_mp3_f4 vl = MA_DR_MP3_VLD(left + i); + ma_dr_mp3_f4 vr = MA_DR_MP3_VLD(right + i); + MA_DR_MP3_VSTORE(left + i, MA_DR_MP3_VADD(vl, vr)); + MA_DR_MP3_VSTORE(right + i, MA_DR_MP3_VSUB(vl, vr)); + } +#ifdef __GNUC__ + if (__builtin_constant_p(n % 4 == 0) && n % 4 == 0) + return; +#endif + } +#endif + for (; i < n; i++) + { + float a = left[i]; + float b = right[i]; + left[i] = a + b; + right[i] = a - b; + } +} +static void ma_dr_mp3_L3_intensity_stereo_band(float *left, int n, float kl, float kr) +{ + int i; + for (i = 0; i < n; i++) + { + left[i + 576] = left[i]*kr; + left[i] = left[i]*kl; + } +} +static void ma_dr_mp3_L3_stereo_top_band(const float *right, const ma_uint8 *sfb, int nbands, int max_band[3]) +{ + int i, k; + max_band[0] = max_band[1] = max_band[2] = -1; + for (i = 0; i < nbands; i++) + { + for (k = 0; k < sfb[i]; k += 2) + { + if (right[k] != 0 || right[k + 1] != 0) + { + max_band[i % 3] = i; + break; + } + } + right += sfb[i]; + } +} +static void ma_dr_mp3_L3_stereo_process(float *left, const ma_uint8 *ist_pos, const ma_uint8 *sfb, const ma_uint8 *hdr, int max_band[3], int mpeg2_sh) +{ + static const float g_pan[7*2] = { 0,1,0.21132487f,0.78867513f,0.36602540f,0.63397460f,0.5f,0.5f,0.63397460f,0.36602540f,0.78867513f,0.21132487f,1,0 }; + unsigned i, max_pos = MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 7 : 64; + for (i = 0; sfb[i]; i++) + { + unsigned ipos = ist_pos[i]; + if ((int)i > max_band[i % 3] && ipos < max_pos) + { + float kl, kr, s = MA_DR_MP3_HDR_TEST_MS_STEREO(hdr) ? 1.41421356f : 1; + if (MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + kl = g_pan[2*ipos]; + kr = g_pan[2*ipos + 1]; + } else + { + kl = 1; + kr = ma_dr_mp3_L3_ldexp_q2(1, (ipos + 1) >> 1 << mpeg2_sh); + if (ipos & 1) + { + kl = kr; + kr = 1; + } + } + ma_dr_mp3_L3_intensity_stereo_band(left, sfb[i], kl*s, kr*s); + } else if (MA_DR_MP3_HDR_TEST_MS_STEREO(hdr)) + { + ma_dr_mp3_L3_midside_stereo(left, sfb[i]); + } + left += sfb[i]; + } +} +static void ma_dr_mp3_L3_intensity_stereo(float *left, ma_uint8 *ist_pos, const ma_dr_mp3_L3_gr_info *gr, const ma_uint8 *hdr) +{ + int max_band[3], n_sfb = gr->n_long_sfb + gr->n_short_sfb; + int i, max_blocks = gr->n_short_sfb ? 3 : 1; + ma_dr_mp3_L3_stereo_top_band(left + 576, gr->sfbtab, n_sfb, max_band); + if (gr->n_long_sfb) + { + max_band[0] = max_band[1] = max_band[2] = MA_DR_MP3_MAX(MA_DR_MP3_MAX(max_band[0], max_band[1]), max_band[2]); + } + for (i = 0; i < max_blocks; i++) + { + int default_pos = MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 3 : 0; + int itop = n_sfb - max_blocks + i; + int prev = itop - max_blocks; + ist_pos[itop] = (ma_uint8)(max_band[i] >= prev ? default_pos : ist_pos[prev]); + } + ma_dr_mp3_L3_stereo_process(left, ist_pos, gr->sfbtab, hdr, max_band, gr[1].scalefac_compress & 1); +} +static void ma_dr_mp3_L3_reorder(float *grbuf, float *scratch, const ma_uint8 *sfb) +{ + int i, len; + float *src = grbuf, *dst = scratch; + for (;0 != (len = *sfb); sfb += 3, src += 2*len) + { + for (i = 0; i < len; i++, src++) + { + *dst++ = src[0*len]; + *dst++ = src[1*len]; + *dst++ = src[2*len]; + } + } + MA_DR_MP3_COPY_MEMORY(grbuf, scratch, (dst - scratch)*sizeof(float)); +} +static void ma_dr_mp3_L3_antialias(float *grbuf, int nbands) +{ + static const float g_aa[2][8] = { + {0.85749293f,0.88174200f,0.94962865f,0.98331459f,0.99551782f,0.99916056f,0.99989920f,0.99999316f}, + {0.51449576f,0.47173197f,0.31337745f,0.18191320f,0.09457419f,0.04096558f,0.01419856f,0.00369997f} + }; + for (; nbands > 0; nbands--, grbuf += 18) + { + int i = 0; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (; i < 8; i += 4) + { + ma_dr_mp3_f4 vu = MA_DR_MP3_VLD(grbuf + 18 + i); + ma_dr_mp3_f4 vd = MA_DR_MP3_VLD(grbuf + 14 - i); + ma_dr_mp3_f4 vc0 = MA_DR_MP3_VLD(g_aa[0] + i); + ma_dr_mp3_f4 vc1 = MA_DR_MP3_VLD(g_aa[1] + i); + vd = MA_DR_MP3_VREV(vd); + MA_DR_MP3_VSTORE(grbuf + 18 + i, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vu, vc0), MA_DR_MP3_VMUL(vd, vc1))); + vd = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vu, vc1), MA_DR_MP3_VMUL(vd, vc0)); + MA_DR_MP3_VSTORE(grbuf + 14 - i, MA_DR_MP3_VREV(vd)); + } +#endif +#ifndef MA_DR_MP3_ONLY_SIMD + for(; i < 8; i++) + { + float u = grbuf[18 + i]; + float d = grbuf[17 - i]; + grbuf[18 + i] = u*g_aa[0][i] - d*g_aa[1][i]; + grbuf[17 - i] = u*g_aa[1][i] + d*g_aa[0][i]; + } +#endif + } +} +static void ma_dr_mp3_L3_dct3_9(float *y) +{ + float s0, s1, s2, s3, s4, s5, s6, s7, s8, t0, t2, t4; + s0 = y[0]; s2 = y[2]; s4 = y[4]; s6 = y[6]; s8 = y[8]; + t0 = s0 + s6*0.5f; + s0 -= s6; + t4 = (s4 + s2)*0.93969262f; + t2 = (s8 + s2)*0.76604444f; + s6 = (s4 - s8)*0.17364818f; + s4 += s8 - s2; + s2 = s0 - s4*0.5f; + y[4] = s4 + s0; + s8 = t0 - t2 + s6; + s0 = t0 - t4 + t2; + s4 = t0 + t4 - s6; + s1 = y[1]; s3 = y[3]; s5 = y[5]; s7 = y[7]; + s3 *= 0.86602540f; + t0 = (s5 + s1)*0.98480775f; + t4 = (s5 - s7)*0.34202014f; + t2 = (s1 + s7)*0.64278761f; + s1 = (s1 - s5 - s7)*0.86602540f; + s5 = t0 - s3 - t2; + s7 = t4 - s3 - t0; + s3 = t4 + s3 - t2; + y[0] = s4 - s7; + y[1] = s2 + s1; + y[2] = s0 - s3; + y[3] = s8 + s5; + y[5] = s8 - s5; + y[6] = s0 + s3; + y[7] = s2 - s1; + y[8] = s4 + s7; +} +static void ma_dr_mp3_L3_imdct36(float *grbuf, float *overlap, const float *window, int nbands) +{ + int i, j; + static const float g_twid9[18] = { + 0.73727734f,0.79335334f,0.84339145f,0.88701083f,0.92387953f,0.95371695f,0.97629601f,0.99144486f,0.99904822f,0.67559021f,0.60876143f,0.53729961f,0.46174861f,0.38268343f,0.30070580f,0.21643961f,0.13052619f,0.04361938f + }; + for (j = 0; j < nbands; j++, grbuf += 18, overlap += 9) + { + float co[9], si[9]; + co[0] = -grbuf[0]; + si[0] = grbuf[17]; + for (i = 0; i < 4; i++) + { + si[8 - 2*i] = grbuf[4*i + 1] - grbuf[4*i + 2]; + co[1 + 2*i] = grbuf[4*i + 1] + grbuf[4*i + 2]; + si[7 - 2*i] = grbuf[4*i + 4] - grbuf[4*i + 3]; + co[2 + 2*i] = -(grbuf[4*i + 3] + grbuf[4*i + 4]); + } + ma_dr_mp3_L3_dct3_9(co); + ma_dr_mp3_L3_dct3_9(si); + si[1] = -si[1]; + si[3] = -si[3]; + si[5] = -si[5]; + si[7] = -si[7]; + i = 0; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (; i < 8; i += 4) + { + ma_dr_mp3_f4 vovl = MA_DR_MP3_VLD(overlap + i); + ma_dr_mp3_f4 vc = MA_DR_MP3_VLD(co + i); + ma_dr_mp3_f4 vs = MA_DR_MP3_VLD(si + i); + ma_dr_mp3_f4 vr0 = MA_DR_MP3_VLD(g_twid9 + i); + ma_dr_mp3_f4 vr1 = MA_DR_MP3_VLD(g_twid9 + 9 + i); + ma_dr_mp3_f4 vw0 = MA_DR_MP3_VLD(window + i); + ma_dr_mp3_f4 vw1 = MA_DR_MP3_VLD(window + 9 + i); + ma_dr_mp3_f4 vsum = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vc, vr1), MA_DR_MP3_VMUL(vs, vr0)); + MA_DR_MP3_VSTORE(overlap + i, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vc, vr0), MA_DR_MP3_VMUL(vs, vr1))); + MA_DR_MP3_VSTORE(grbuf + i, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vovl, vw0), MA_DR_MP3_VMUL(vsum, vw1))); + vsum = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vovl, vw1), MA_DR_MP3_VMUL(vsum, vw0)); + MA_DR_MP3_VSTORE(grbuf + 14 - i, MA_DR_MP3_VREV(vsum)); + } +#endif + for (; i < 9; i++) + { + float ovl = overlap[i]; + float sum = co[i]*g_twid9[9 + i] + si[i]*g_twid9[0 + i]; + overlap[i] = co[i]*g_twid9[0 + i] - si[i]*g_twid9[9 + i]; + grbuf[i] = ovl*window[0 + i] - sum*window[9 + i]; + grbuf[17 - i] = ovl*window[9 + i] + sum*window[0 + i]; + } + } +} +static void ma_dr_mp3_L3_idct3(float x0, float x1, float x2, float *dst) +{ + float m1 = x1*0.86602540f; + float a1 = x0 - x2*0.5f; + dst[1] = x0 + x2; + dst[0] = a1 + m1; + dst[2] = a1 - m1; +} +static void ma_dr_mp3_L3_imdct12(float *x, float *dst, float *overlap) +{ + static const float g_twid3[6] = { 0.79335334f,0.92387953f,0.99144486f, 0.60876143f,0.38268343f,0.13052619f }; + float co[3], si[3]; + int i; + ma_dr_mp3_L3_idct3(-x[0], x[6] + x[3], x[12] + x[9], co); + ma_dr_mp3_L3_idct3(x[15], x[12] - x[9], x[6] - x[3], si); + si[1] = -si[1]; + for (i = 0; i < 3; i++) + { + float ovl = overlap[i]; + float sum = co[i]*g_twid3[3 + i] + si[i]*g_twid3[0 + i]; + overlap[i] = co[i]*g_twid3[0 + i] - si[i]*g_twid3[3 + i]; + dst[i] = ovl*g_twid3[2 - i] - sum*g_twid3[5 - i]; + dst[5 - i] = ovl*g_twid3[5 - i] + sum*g_twid3[2 - i]; + } +} +static void ma_dr_mp3_L3_imdct_short(float *grbuf, float *overlap, int nbands) +{ + for (;nbands > 0; nbands--, overlap += 9, grbuf += 18) + { + float tmp[18]; + MA_DR_MP3_COPY_MEMORY(tmp, grbuf, sizeof(tmp)); + MA_DR_MP3_COPY_MEMORY(grbuf, overlap, 6*sizeof(float)); + ma_dr_mp3_L3_imdct12(tmp, grbuf + 6, overlap + 6); + ma_dr_mp3_L3_imdct12(tmp + 1, grbuf + 12, overlap + 6); + ma_dr_mp3_L3_imdct12(tmp + 2, overlap, overlap + 6); + } +} +static void ma_dr_mp3_L3_change_sign(float *grbuf) +{ + int b, i; + for (b = 0, grbuf += 18; b < 32; b += 2, grbuf += 36) + for (i = 1; i < 18; i += 2) + grbuf[i] = -grbuf[i]; +} +static void ma_dr_mp3_L3_imdct_gr(float *grbuf, float *overlap, unsigned block_type, unsigned n_long_bands) +{ + static const float g_mdct_window[2][18] = { + { 0.99904822f,0.99144486f,0.97629601f,0.95371695f,0.92387953f,0.88701083f,0.84339145f,0.79335334f,0.73727734f,0.04361938f,0.13052619f,0.21643961f,0.30070580f,0.38268343f,0.46174861f,0.53729961f,0.60876143f,0.67559021f }, + { 1,1,1,1,1,1,0.99144486f,0.92387953f,0.79335334f,0,0,0,0,0,0,0.13052619f,0.38268343f,0.60876143f } + }; + if (n_long_bands) + { + ma_dr_mp3_L3_imdct36(grbuf, overlap, g_mdct_window[0], n_long_bands); + grbuf += 18*n_long_bands; + overlap += 9*n_long_bands; + } + if (block_type == MA_DR_MP3_SHORT_BLOCK_TYPE) + ma_dr_mp3_L3_imdct_short(grbuf, overlap, 32 - n_long_bands); + else + ma_dr_mp3_L3_imdct36(grbuf, overlap, g_mdct_window[block_type == MA_DR_MP3_STOP_BLOCK_TYPE], 32 - n_long_bands); +} +static void ma_dr_mp3_L3_save_reservoir(ma_dr_mp3dec *h, ma_dr_mp3dec_scratch *s) +{ + int pos = (s->bs.pos + 7)/8u; + int remains = s->bs.limit/8u - pos; + if (remains > MA_DR_MP3_MAX_BITRESERVOIR_BYTES) + { + pos += remains - MA_DR_MP3_MAX_BITRESERVOIR_BYTES; + remains = MA_DR_MP3_MAX_BITRESERVOIR_BYTES; + } + if (remains > 0) + { + MA_DR_MP3_MOVE_MEMORY(h->reserv_buf, s->maindata + pos, remains); + } + h->reserv = remains; +} +static int ma_dr_mp3_L3_restore_reservoir(ma_dr_mp3dec *h, ma_dr_mp3_bs *bs, ma_dr_mp3dec_scratch *s, int main_data_begin) +{ + int frame_bytes = (bs->limit - bs->pos)/8; + int bytes_have = MA_DR_MP3_MIN(h->reserv, main_data_begin); + MA_DR_MP3_COPY_MEMORY(s->maindata, h->reserv_buf + MA_DR_MP3_MAX(0, h->reserv - main_data_begin), MA_DR_MP3_MIN(h->reserv, main_data_begin)); + MA_DR_MP3_COPY_MEMORY(s->maindata + bytes_have, bs->buf + bs->pos/8, frame_bytes); + ma_dr_mp3_bs_init(&s->bs, s->maindata, bytes_have + frame_bytes); + return h->reserv >= main_data_begin; +} +static void ma_dr_mp3_L3_decode(ma_dr_mp3dec *h, ma_dr_mp3dec_scratch *s, ma_dr_mp3_L3_gr_info *gr_info, int nch) +{ + int ch; + for (ch = 0; ch < nch; ch++) + { + int layer3gr_limit = s->bs.pos + gr_info[ch].part_23_length; + ma_dr_mp3_L3_decode_scalefactors(h->header, s->ist_pos[ch], &s->bs, gr_info + ch, s->scf, ch); + ma_dr_mp3_L3_huffman(s->grbuf[ch], &s->bs, gr_info + ch, s->scf, layer3gr_limit); + } + if (MA_DR_MP3_HDR_TEST_I_STEREO(h->header)) + { + ma_dr_mp3_L3_intensity_stereo(s->grbuf[0], s->ist_pos[1], gr_info, h->header); + } else if (MA_DR_MP3_HDR_IS_MS_STEREO(h->header)) + { + ma_dr_mp3_L3_midside_stereo(s->grbuf[0], 576); + } + for (ch = 0; ch < nch; ch++, gr_info++) + { + int aa_bands = 31; + int n_long_bands = (gr_info->mixed_block_flag ? 2 : 0) << (int)(MA_DR_MP3_HDR_GET_MY_SAMPLE_RATE(h->header) == 2); + if (gr_info->n_short_sfb) + { + aa_bands = n_long_bands - 1; + ma_dr_mp3_L3_reorder(s->grbuf[ch] + n_long_bands*18, s->syn[0], gr_info->sfbtab + gr_info->n_long_sfb); + } + ma_dr_mp3_L3_antialias(s->grbuf[ch], aa_bands); + ma_dr_mp3_L3_imdct_gr(s->grbuf[ch], h->mdct_overlap[ch], gr_info->block_type, n_long_bands); + ma_dr_mp3_L3_change_sign(s->grbuf[ch]); + } +} +static void ma_dr_mp3d_DCT_II(float *grbuf, int n) +{ + static const float g_sec[24] = { + 10.19000816f,0.50060302f,0.50241929f,3.40760851f,0.50547093f,0.52249861f,2.05778098f,0.51544732f,0.56694406f,1.48416460f,0.53104258f,0.64682180f,1.16943991f,0.55310392f,0.78815460f,0.97256821f,0.58293498f,1.06067765f,0.83934963f,0.62250412f,1.72244716f,0.74453628f,0.67480832f,5.10114861f + }; + int i, k = 0; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (; k < n; k += 4) + { + ma_dr_mp3_f4 t[4][8], *x; + float *y = grbuf + k; + for (x = t[0], i = 0; i < 8; i++, x++) + { + ma_dr_mp3_f4 x0 = MA_DR_MP3_VLD(&y[i*18]); + ma_dr_mp3_f4 x1 = MA_DR_MP3_VLD(&y[(15 - i)*18]); + ma_dr_mp3_f4 x2 = MA_DR_MP3_VLD(&y[(16 + i)*18]); + ma_dr_mp3_f4 x3 = MA_DR_MP3_VLD(&y[(31 - i)*18]); + ma_dr_mp3_f4 t0 = MA_DR_MP3_VADD(x0, x3); + ma_dr_mp3_f4 t1 = MA_DR_MP3_VADD(x1, x2); + ma_dr_mp3_f4 t2 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x1, x2), g_sec[3*i + 0]); + ma_dr_mp3_f4 t3 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x0, x3), g_sec[3*i + 1]); + x[0] = MA_DR_MP3_VADD(t0, t1); + x[8] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(t0, t1), g_sec[3*i + 2]); + x[16] = MA_DR_MP3_VADD(t3, t2); + x[24] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(t3, t2), g_sec[3*i + 2]); + } + for (x = t[0], i = 0; i < 4; i++, x += 8) + { + ma_dr_mp3_f4 x0 = x[0], x1 = x[1], x2 = x[2], x3 = x[3], x4 = x[4], x5 = x[5], x6 = x[6], x7 = x[7], xt; + xt = MA_DR_MP3_VSUB(x0, x7); x0 = MA_DR_MP3_VADD(x0, x7); + x7 = MA_DR_MP3_VSUB(x1, x6); x1 = MA_DR_MP3_VADD(x1, x6); + x6 = MA_DR_MP3_VSUB(x2, x5); x2 = MA_DR_MP3_VADD(x2, x5); + x5 = MA_DR_MP3_VSUB(x3, x4); x3 = MA_DR_MP3_VADD(x3, x4); + x4 = MA_DR_MP3_VSUB(x0, x3); x0 = MA_DR_MP3_VADD(x0, x3); + x3 = MA_DR_MP3_VSUB(x1, x2); x1 = MA_DR_MP3_VADD(x1, x2); + x[0] = MA_DR_MP3_VADD(x0, x1); + x[4] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x0, x1), 0.70710677f); + x5 = MA_DR_MP3_VADD(x5, x6); + x6 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x6, x7), 0.70710677f); + x7 = MA_DR_MP3_VADD(x7, xt); + x3 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x3, x4), 0.70710677f); + x5 = MA_DR_MP3_VSUB(x5, MA_DR_MP3_VMUL_S(x7, 0.198912367f)); + x7 = MA_DR_MP3_VADD(x7, MA_DR_MP3_VMUL_S(x5, 0.382683432f)); + x5 = MA_DR_MP3_VSUB(x5, MA_DR_MP3_VMUL_S(x7, 0.198912367f)); + x0 = MA_DR_MP3_VSUB(xt, x6); xt = MA_DR_MP3_VADD(xt, x6); + x[1] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(xt, x7), 0.50979561f); + x[2] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x4, x3), 0.54119611f); + x[3] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x0, x5), 0.60134488f); + x[5] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x0, x5), 0.89997619f); + x[6] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x4, x3), 1.30656302f); + x[7] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(xt, x7), 2.56291556f); + } + if (k > n - 3) + { +#if MA_DR_MP3_HAVE_SSE +#define MA_DR_MP3_VSAVE2(i, v) _mm_storel_pi((__m64 *)(void*)&y[i*18], v) +#else +#define MA_DR_MP3_VSAVE2(i, v) vst1_f32((float32_t *)&y[(i)*18], vget_low_f32(v)) +#endif + for (i = 0; i < 7; i++, y += 4*18) + { + ma_dr_mp3_f4 s = MA_DR_MP3_VADD(t[3][i], t[3][i + 1]); + MA_DR_MP3_VSAVE2(0, t[0][i]); + MA_DR_MP3_VSAVE2(1, MA_DR_MP3_VADD(t[2][i], s)); + MA_DR_MP3_VSAVE2(2, MA_DR_MP3_VADD(t[1][i], t[1][i + 1])); + MA_DR_MP3_VSAVE2(3, MA_DR_MP3_VADD(t[2][1 + i], s)); + } + MA_DR_MP3_VSAVE2(0, t[0][7]); + MA_DR_MP3_VSAVE2(1, MA_DR_MP3_VADD(t[2][7], t[3][7])); + MA_DR_MP3_VSAVE2(2, t[1][7]); + MA_DR_MP3_VSAVE2(3, t[3][7]); + } else + { +#define MA_DR_MP3_VSAVE4(i, v) MA_DR_MP3_VSTORE(&y[(i)*18], v) + for (i = 0; i < 7; i++, y += 4*18) + { + ma_dr_mp3_f4 s = MA_DR_MP3_VADD(t[3][i], t[3][i + 1]); + MA_DR_MP3_VSAVE4(0, t[0][i]); + MA_DR_MP3_VSAVE4(1, MA_DR_MP3_VADD(t[2][i], s)); + MA_DR_MP3_VSAVE4(2, MA_DR_MP3_VADD(t[1][i], t[1][i + 1])); + MA_DR_MP3_VSAVE4(3, MA_DR_MP3_VADD(t[2][1 + i], s)); + } + MA_DR_MP3_VSAVE4(0, t[0][7]); + MA_DR_MP3_VSAVE4(1, MA_DR_MP3_VADD(t[2][7], t[3][7])); + MA_DR_MP3_VSAVE4(2, t[1][7]); + MA_DR_MP3_VSAVE4(3, t[3][7]); + } + } else +#endif +#ifdef MA_DR_MP3_ONLY_SIMD + {} +#else + for (; k < n; k++) + { + float t[4][8], *x, *y = grbuf + k; + for (x = t[0], i = 0; i < 8; i++, x++) + { + float x0 = y[i*18]; + float x1 = y[(15 - i)*18]; + float x2 = y[(16 + i)*18]; + float x3 = y[(31 - i)*18]; + float t0 = x0 + x3; + float t1 = x1 + x2; + float t2 = (x1 - x2)*g_sec[3*i + 0]; + float t3 = (x0 - x3)*g_sec[3*i + 1]; + x[0] = t0 + t1; + x[8] = (t0 - t1)*g_sec[3*i + 2]; + x[16] = t3 + t2; + x[24] = (t3 - t2)*g_sec[3*i + 2]; + } + for (x = t[0], i = 0; i < 4; i++, x += 8) + { + float x0 = x[0], x1 = x[1], x2 = x[2], x3 = x[3], x4 = x[4], x5 = x[5], x6 = x[6], x7 = x[7], xt; + xt = x0 - x7; x0 += x7; + x7 = x1 - x6; x1 += x6; + x6 = x2 - x5; x2 += x5; + x5 = x3 - x4; x3 += x4; + x4 = x0 - x3; x0 += x3; + x3 = x1 - x2; x1 += x2; + x[0] = x0 + x1; + x[4] = (x0 - x1)*0.70710677f; + x5 = x5 + x6; + x6 = (x6 + x7)*0.70710677f; + x7 = x7 + xt; + x3 = (x3 + x4)*0.70710677f; + x5 -= x7*0.198912367f; + x7 += x5*0.382683432f; + x5 -= x7*0.198912367f; + x0 = xt - x6; xt += x6; + x[1] = (xt + x7)*0.50979561f; + x[2] = (x4 + x3)*0.54119611f; + x[3] = (x0 - x5)*0.60134488f; + x[5] = (x0 + x5)*0.89997619f; + x[6] = (x4 - x3)*1.30656302f; + x[7] = (xt - x7)*2.56291556f; + } + for (i = 0; i < 7; i++, y += 4*18) + { + y[0*18] = t[0][i]; + y[1*18] = t[2][i] + t[3][i] + t[3][i + 1]; + y[2*18] = t[1][i] + t[1][i + 1]; + y[3*18] = t[2][i + 1] + t[3][i] + t[3][i + 1]; + } + y[0*18] = t[0][7]; + y[1*18] = t[2][7] + t[3][7]; + y[2*18] = t[1][7]; + y[3*18] = t[3][7]; + } +#endif +} +#ifndef MA_DR_MP3_FLOAT_OUTPUT +typedef ma_int16 ma_dr_mp3d_sample_t; +static ma_int16 ma_dr_mp3d_scale_pcm(float sample) +{ + ma_int16 s; +#if MA_DR_MP3_HAVE_ARMV6 + ma_int32 s32 = (ma_int32)(sample + .5f); + s32 -= (s32 < 0); + s = (ma_int16)ma_dr_mp3_clip_int16_arm(s32); +#else + if (sample >= 32766.5) return (ma_int16) 32767; + if (sample <= -32767.5) return (ma_int16)-32768; + s = (ma_int16)(sample + .5f); + s -= (s < 0); +#endif + return s; +} +#else +typedef float ma_dr_mp3d_sample_t; +static float ma_dr_mp3d_scale_pcm(float sample) +{ + return sample*(1.f/32768.f); +} +#endif +static void ma_dr_mp3d_synth_pair(ma_dr_mp3d_sample_t *pcm, int nch, const float *z) +{ + float a; + a = (z[14*64] - z[ 0]) * 29; + a += (z[ 1*64] + z[13*64]) * 213; + a += (z[12*64] - z[ 2*64]) * 459; + a += (z[ 3*64] + z[11*64]) * 2037; + a += (z[10*64] - z[ 4*64]) * 5153; + a += (z[ 5*64] + z[ 9*64]) * 6574; + a += (z[ 8*64] - z[ 6*64]) * 37489; + a += z[ 7*64] * 75038; + pcm[0] = ma_dr_mp3d_scale_pcm(a); + z += 2; + a = z[14*64] * 104; + a += z[12*64] * 1567; + a += z[10*64] * 9727; + a += z[ 8*64] * 64019; + a += z[ 6*64] * -9975; + a += z[ 4*64] * -45; + a += z[ 2*64] * 146; + a += z[ 0*64] * -5; + pcm[16*nch] = ma_dr_mp3d_scale_pcm(a); +} +static void ma_dr_mp3d_synth(float *xl, ma_dr_mp3d_sample_t *dstl, int nch, float *lins) +{ + int i; + float *xr = xl + 576*(nch - 1); + ma_dr_mp3d_sample_t *dstr = dstl + (nch - 1); + static const float g_win[] = { + -1,26,-31,208,218,401,-519,2063,2000,4788,-5517,7134,5959,35640,-39336,74992, + -1,24,-35,202,222,347,-581,2080,1952,4425,-5879,7640,5288,33791,-41176,74856, + -1,21,-38,196,225,294,-645,2087,1893,4063,-6237,8092,4561,31947,-43006,74630, + -1,19,-41,190,227,244,-711,2085,1822,3705,-6589,8492,3776,30112,-44821,74313, + -1,17,-45,183,228,197,-779,2075,1739,3351,-6935,8840,2935,28289,-46617,73908, + -1,16,-49,176,228,153,-848,2057,1644,3004,-7271,9139,2037,26482,-48390,73415, + -2,14,-53,169,227,111,-919,2032,1535,2663,-7597,9389,1082,24694,-50137,72835, + -2,13,-58,161,224,72,-991,2001,1414,2330,-7910,9592,70,22929,-51853,72169, + -2,11,-63,154,221,36,-1064,1962,1280,2006,-8209,9750,-998,21189,-53534,71420, + -2,10,-68,147,215,2,-1137,1919,1131,1692,-8491,9863,-2122,19478,-55178,70590, + -3,9,-73,139,208,-29,-1210,1870,970,1388,-8755,9935,-3300,17799,-56778,69679, + -3,8,-79,132,200,-57,-1283,1817,794,1095,-8998,9966,-4533,16155,-58333,68692, + -4,7,-85,125,189,-83,-1356,1759,605,814,-9219,9959,-5818,14548,-59838,67629, + -4,7,-91,117,177,-106,-1428,1698,402,545,-9416,9916,-7154,12980,-61289,66494, + -5,6,-97,111,163,-127,-1498,1634,185,288,-9585,9838,-8540,11455,-62684,65290 + }; + float *zlin = lins + 15*64; + const float *w = g_win; + zlin[4*15] = xl[18*16]; + zlin[4*15 + 1] = xr[18*16]; + zlin[4*15 + 2] = xl[0]; + zlin[4*15 + 3] = xr[0]; + zlin[4*31] = xl[1 + 18*16]; + zlin[4*31 + 1] = xr[1 + 18*16]; + zlin[4*31 + 2] = xl[1]; + zlin[4*31 + 3] = xr[1]; + ma_dr_mp3d_synth_pair(dstr, nch, lins + 4*15 + 1); + ma_dr_mp3d_synth_pair(dstr + 32*nch, nch, lins + 4*15 + 64 + 1); + ma_dr_mp3d_synth_pair(dstl, nch, lins + 4*15); + ma_dr_mp3d_synth_pair(dstl + 32*nch, nch, lins + 4*15 + 64); +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (i = 14; i >= 0; i--) + { +#define MA_DR_MP3_VLOAD(k) ma_dr_mp3_f4 w0 = MA_DR_MP3_VSET(*w++); ma_dr_mp3_f4 w1 = MA_DR_MP3_VSET(*w++); ma_dr_mp3_f4 vz = MA_DR_MP3_VLD(&zlin[4*i - 64*k]); ma_dr_mp3_f4 vy = MA_DR_MP3_VLD(&zlin[4*i - 64*(15 - k)]); +#define MA_DR_MP3_V0(k) { MA_DR_MP3_VLOAD(k) b = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vz, w1), MA_DR_MP3_VMUL(vy, w0)) ; a = MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vz, w0), MA_DR_MP3_VMUL(vy, w1)); } +#define MA_DR_MP3_V1(k) { MA_DR_MP3_VLOAD(k) b = MA_DR_MP3_VADD(b, MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vz, w1), MA_DR_MP3_VMUL(vy, w0))); a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vz, w0), MA_DR_MP3_VMUL(vy, w1))); } +#define MA_DR_MP3_V2(k) { MA_DR_MP3_VLOAD(k) b = MA_DR_MP3_VADD(b, MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vz, w1), MA_DR_MP3_VMUL(vy, w0))); a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vy, w1), MA_DR_MP3_VMUL(vz, w0))); } + ma_dr_mp3_f4 a, b; + zlin[4*i] = xl[18*(31 - i)]; + zlin[4*i + 1] = xr[18*(31 - i)]; + zlin[4*i + 2] = xl[1 + 18*(31 - i)]; + zlin[4*i + 3] = xr[1 + 18*(31 - i)]; + zlin[4*i + 64] = xl[1 + 18*(1 + i)]; + zlin[4*i + 64 + 1] = xr[1 + 18*(1 + i)]; + zlin[4*i - 64 + 2] = xl[18*(1 + i)]; + zlin[4*i - 64 + 3] = xr[18*(1 + i)]; + MA_DR_MP3_V0(0) MA_DR_MP3_V2(1) MA_DR_MP3_V1(2) MA_DR_MP3_V2(3) MA_DR_MP3_V1(4) MA_DR_MP3_V2(5) MA_DR_MP3_V1(6) MA_DR_MP3_V2(7) + { +#ifndef MA_DR_MP3_FLOAT_OUTPUT +#if MA_DR_MP3_HAVE_SSE + static const ma_dr_mp3_f4 g_max = { 32767.0f, 32767.0f, 32767.0f, 32767.0f }; + static const ma_dr_mp3_f4 g_min = { -32768.0f, -32768.0f, -32768.0f, -32768.0f }; + __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, g_max), g_min)), + _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, g_max), g_min))); + dstr[(15 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 1); + dstr[(17 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 5); + dstl[(15 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 0); + dstl[(17 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 4); + dstr[(47 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 3); + dstr[(49 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 7); + dstl[(47 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 2); + dstl[(49 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 6); +#else + int16x4_t pcma, pcmb; + a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSET(0.5f)); + b = MA_DR_MP3_VADD(b, MA_DR_MP3_VSET(0.5f)); + pcma = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(a), vreinterpretq_s32_u32(vcltq_f32(a, MA_DR_MP3_VSET(0))))); + pcmb = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(b), vreinterpretq_s32_u32(vcltq_f32(b, MA_DR_MP3_VSET(0))))); + vst1_lane_s16(dstr + (15 - i)*nch, pcma, 1); + vst1_lane_s16(dstr + (17 + i)*nch, pcmb, 1); + vst1_lane_s16(dstl + (15 - i)*nch, pcma, 0); + vst1_lane_s16(dstl + (17 + i)*nch, pcmb, 0); + vst1_lane_s16(dstr + (47 - i)*nch, pcma, 3); + vst1_lane_s16(dstr + (49 + i)*nch, pcmb, 3); + vst1_lane_s16(dstl + (47 - i)*nch, pcma, 2); + vst1_lane_s16(dstl + (49 + i)*nch, pcmb, 2); +#endif +#else +#if MA_DR_MP3_HAVE_SSE + static const ma_dr_mp3_f4 g_scale = { 1.0f/32768.0f, 1.0f/32768.0f, 1.0f/32768.0f, 1.0f/32768.0f }; +#else + const ma_dr_mp3_f4 g_scale = vdupq_n_f32(1.0f/32768.0f); +#endif + a = MA_DR_MP3_VMUL(a, g_scale); + b = MA_DR_MP3_VMUL(b, g_scale); +#if MA_DR_MP3_HAVE_SSE + _mm_store_ss(dstr + (15 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1))); + _mm_store_ss(dstr + (17 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(1, 1, 1, 1))); + _mm_store_ss(dstl + (15 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0))); + _mm_store_ss(dstl + (17 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(0, 0, 0, 0))); + _mm_store_ss(dstr + (47 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3))); + _mm_store_ss(dstr + (49 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 3, 3, 3))); + _mm_store_ss(dstl + (47 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2))); + _mm_store_ss(dstl + (49 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(2, 2, 2, 2))); +#else + vst1q_lane_f32(dstr + (15 - i)*nch, a, 1); + vst1q_lane_f32(dstr + (17 + i)*nch, b, 1); + vst1q_lane_f32(dstl + (15 - i)*nch, a, 0); + vst1q_lane_f32(dstl + (17 + i)*nch, b, 0); + vst1q_lane_f32(dstr + (47 - i)*nch, a, 3); + vst1q_lane_f32(dstr + (49 + i)*nch, b, 3); + vst1q_lane_f32(dstl + (47 - i)*nch, a, 2); + vst1q_lane_f32(dstl + (49 + i)*nch, b, 2); +#endif +#endif + } + } else +#endif +#ifdef MA_DR_MP3_ONLY_SIMD + {} +#else + for (i = 14; i >= 0; i--) + { +#define MA_DR_MP3_LOAD(k) float w0 = *w++; float w1 = *w++; float *vz = &zlin[4*i - k*64]; float *vy = &zlin[4*i - (15 - k)*64]; +#define MA_DR_MP3_S0(k) { int j; MA_DR_MP3_LOAD(k); for (j = 0; j < 4; j++) b[j] = vz[j]*w1 + vy[j]*w0, a[j] = vz[j]*w0 - vy[j]*w1; } +#define MA_DR_MP3_S1(k) { int j; MA_DR_MP3_LOAD(k); for (j = 0; j < 4; j++) b[j] += vz[j]*w1 + vy[j]*w0, a[j] += vz[j]*w0 - vy[j]*w1; } +#define MA_DR_MP3_S2(k) { int j; MA_DR_MP3_LOAD(k); for (j = 0; j < 4; j++) b[j] += vz[j]*w1 + vy[j]*w0, a[j] += vy[j]*w1 - vz[j]*w0; } + float a[4], b[4]; + zlin[4*i] = xl[18*(31 - i)]; + zlin[4*i + 1] = xr[18*(31 - i)]; + zlin[4*i + 2] = xl[1 + 18*(31 - i)]; + zlin[4*i + 3] = xr[1 + 18*(31 - i)]; + zlin[4*(i + 16)] = xl[1 + 18*(1 + i)]; + zlin[4*(i + 16) + 1] = xr[1 + 18*(1 + i)]; + zlin[4*(i - 16) + 2] = xl[18*(1 + i)]; + zlin[4*(i - 16) + 3] = xr[18*(1 + i)]; + MA_DR_MP3_S0(0) MA_DR_MP3_S2(1) MA_DR_MP3_S1(2) MA_DR_MP3_S2(3) MA_DR_MP3_S1(4) MA_DR_MP3_S2(5) MA_DR_MP3_S1(6) MA_DR_MP3_S2(7) + dstr[(15 - i)*nch] = ma_dr_mp3d_scale_pcm(a[1]); + dstr[(17 + i)*nch] = ma_dr_mp3d_scale_pcm(b[1]); + dstl[(15 - i)*nch] = ma_dr_mp3d_scale_pcm(a[0]); + dstl[(17 + i)*nch] = ma_dr_mp3d_scale_pcm(b[0]); + dstr[(47 - i)*nch] = ma_dr_mp3d_scale_pcm(a[3]); + dstr[(49 + i)*nch] = ma_dr_mp3d_scale_pcm(b[3]); + dstl[(47 - i)*nch] = ma_dr_mp3d_scale_pcm(a[2]); + dstl[(49 + i)*nch] = ma_dr_mp3d_scale_pcm(b[2]); + } +#endif +} +static void ma_dr_mp3d_synth_granule(float *qmf_state, float *grbuf, int nbands, int nch, ma_dr_mp3d_sample_t *pcm, float *lins) +{ + int i; + for (i = 0; i < nch; i++) + { + ma_dr_mp3d_DCT_II(grbuf + 576*i, nbands); + } + MA_DR_MP3_COPY_MEMORY(lins, qmf_state, sizeof(float)*15*64); + for (i = 0; i < nbands; i += 2) + { + ma_dr_mp3d_synth(grbuf + i, pcm + 32*nch*i, nch, lins + i*64); + } +#ifndef MA_DR_MP3_NONSTANDARD_BUT_LOGICAL + if (nch == 1) + { + for (i = 0; i < 15*64; i += 2) + { + qmf_state[i] = lins[nbands*64 + i]; + } + } else +#endif + { + MA_DR_MP3_COPY_MEMORY(qmf_state, lins + nbands*64, sizeof(float)*15*64); + } +} +static int ma_dr_mp3d_match_frame(const ma_uint8 *hdr, int mp3_bytes, int frame_bytes) +{ + int i, nmatch; + for (i = 0, nmatch = 0; nmatch < MA_DR_MP3_MAX_FRAME_SYNC_MATCHES; nmatch++) + { + i += ma_dr_mp3_hdr_frame_bytes(hdr + i, frame_bytes) + ma_dr_mp3_hdr_padding(hdr + i); + if (i + MA_DR_MP3_HDR_SIZE > mp3_bytes) + return nmatch > 0; + if (!ma_dr_mp3_hdr_compare(hdr, hdr + i)) + return 0; + } + return 1; +} +static int ma_dr_mp3d_find_frame(const ma_uint8 *mp3, int mp3_bytes, int *free_format_bytes, int *ptr_frame_bytes) +{ + int i, k; + for (i = 0; i < mp3_bytes - MA_DR_MP3_HDR_SIZE; i++, mp3++) + { + if (ma_dr_mp3_hdr_valid(mp3)) + { + int frame_bytes = ma_dr_mp3_hdr_frame_bytes(mp3, *free_format_bytes); + int frame_and_padding = frame_bytes + ma_dr_mp3_hdr_padding(mp3); + for (k = MA_DR_MP3_HDR_SIZE; !frame_bytes && k < MA_DR_MP3_MAX_FREE_FORMAT_FRAME_SIZE && i + 2*k < mp3_bytes - MA_DR_MP3_HDR_SIZE; k++) + { + if (ma_dr_mp3_hdr_compare(mp3, mp3 + k)) + { + int fb = k - ma_dr_mp3_hdr_padding(mp3); + int nextfb = fb + ma_dr_mp3_hdr_padding(mp3 + k); + if (i + k + nextfb + MA_DR_MP3_HDR_SIZE > mp3_bytes || !ma_dr_mp3_hdr_compare(mp3, mp3 + k + nextfb)) + continue; + frame_and_padding = k; + frame_bytes = fb; + *free_format_bytes = fb; + } + } + if ((frame_bytes && i + frame_and_padding <= mp3_bytes && + ma_dr_mp3d_match_frame(mp3, mp3_bytes - i, frame_bytes)) || + (!i && frame_and_padding == mp3_bytes)) + { + *ptr_frame_bytes = frame_and_padding; + return i; + } + *free_format_bytes = 0; + } + } + *ptr_frame_bytes = 0; + return mp3_bytes; +} +MA_API void ma_dr_mp3dec_init(ma_dr_mp3dec *dec) +{ + dec->header[0] = 0; +} +MA_API int ma_dr_mp3dec_decode_frame(ma_dr_mp3dec *dec, const ma_uint8 *mp3, int mp3_bytes, void *pcm, ma_dr_mp3dec_frame_info *info) +{ + int i = 0, igr, frame_size = 0, success = 1; + const ma_uint8 *hdr; + ma_dr_mp3_bs bs_frame[1]; + ma_dr_mp3dec_scratch scratch; + if (mp3_bytes > 4 && dec->header[0] == 0xff && ma_dr_mp3_hdr_compare(dec->header, mp3)) + { + frame_size = ma_dr_mp3_hdr_frame_bytes(mp3, dec->free_format_bytes) + ma_dr_mp3_hdr_padding(mp3); + if (frame_size != mp3_bytes && (frame_size + MA_DR_MP3_HDR_SIZE > mp3_bytes || !ma_dr_mp3_hdr_compare(mp3, mp3 + frame_size))) + { + frame_size = 0; + } + } + if (!frame_size) + { + MA_DR_MP3_ZERO_MEMORY(dec, sizeof(ma_dr_mp3dec)); + i = ma_dr_mp3d_find_frame(mp3, mp3_bytes, &dec->free_format_bytes, &frame_size); + if (!frame_size || i + frame_size > mp3_bytes) + { + info->frame_bytes = i; + return 0; + } + } + hdr = mp3 + i; + MA_DR_MP3_COPY_MEMORY(dec->header, hdr, MA_DR_MP3_HDR_SIZE); + info->frame_bytes = i + frame_size; + info->channels = MA_DR_MP3_HDR_IS_MONO(hdr) ? 1 : 2; + info->hz = ma_dr_mp3_hdr_sample_rate_hz(hdr); + info->layer = 4 - MA_DR_MP3_HDR_GET_LAYER(hdr); + info->bitrate_kbps = ma_dr_mp3_hdr_bitrate_kbps(hdr); + ma_dr_mp3_bs_init(bs_frame, hdr + MA_DR_MP3_HDR_SIZE, frame_size - MA_DR_MP3_HDR_SIZE); + if (MA_DR_MP3_HDR_IS_CRC(hdr)) + { + ma_dr_mp3_bs_get_bits(bs_frame, 16); + } + if (info->layer == 3) + { + int main_data_begin = ma_dr_mp3_L3_read_side_info(bs_frame, scratch.gr_info, hdr); + if (main_data_begin < 0 || bs_frame->pos > bs_frame->limit) + { + ma_dr_mp3dec_init(dec); + return 0; + } + success = ma_dr_mp3_L3_restore_reservoir(dec, bs_frame, &scratch, main_data_begin); + if (success && pcm != NULL) + { + for (igr = 0; igr < (MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 2 : 1); igr++, pcm = MA_DR_MP3_OFFSET_PTR(pcm, sizeof(ma_dr_mp3d_sample_t)*576*info->channels)) + { + MA_DR_MP3_ZERO_MEMORY(scratch.grbuf[0], 576*2*sizeof(float)); + ma_dr_mp3_L3_decode(dec, &scratch, scratch.gr_info + igr*info->channels, info->channels); + ma_dr_mp3d_synth_granule(dec->qmf_state, scratch.grbuf[0], 18, info->channels, (ma_dr_mp3d_sample_t*)pcm, scratch.syn[0]); + } + } + ma_dr_mp3_L3_save_reservoir(dec, &scratch); + } else + { +#ifdef MA_DR_MP3_ONLY_MP3 + return 0; +#else + ma_dr_mp3_L12_scale_info sci[1]; + if (pcm == NULL) { + return ma_dr_mp3_hdr_frame_samples(hdr); + } + ma_dr_mp3_L12_read_scale_info(hdr, bs_frame, sci); + MA_DR_MP3_ZERO_MEMORY(scratch.grbuf[0], 576*2*sizeof(float)); + for (i = 0, igr = 0; igr < 3; igr++) + { + if (12 == (i += ma_dr_mp3_L12_dequantize_granule(scratch.grbuf[0] + i, bs_frame, sci, info->layer | 1))) + { + i = 0; + ma_dr_mp3_L12_apply_scf_384(sci, sci->scf + igr, scratch.grbuf[0]); + ma_dr_mp3d_synth_granule(dec->qmf_state, scratch.grbuf[0], 12, info->channels, (ma_dr_mp3d_sample_t*)pcm, scratch.syn[0]); + MA_DR_MP3_ZERO_MEMORY(scratch.grbuf[0], 576*2*sizeof(float)); + pcm = MA_DR_MP3_OFFSET_PTR(pcm, sizeof(ma_dr_mp3d_sample_t)*384*info->channels); + } + if (bs_frame->pos > bs_frame->limit) + { + ma_dr_mp3dec_init(dec); + return 0; + } + } +#endif + } + return success*ma_dr_mp3_hdr_frame_samples(dec->header); +} +MA_API void ma_dr_mp3dec_f32_to_s16(const float *in, ma_int16 *out, size_t num_samples) +{ + size_t i = 0; +#if MA_DR_MP3_HAVE_SIMD + size_t aligned_count = num_samples & ~7; + for(; i < aligned_count; i+=8) + { + ma_dr_mp3_f4 scale = MA_DR_MP3_VSET(32768.0f); + ma_dr_mp3_f4 a = MA_DR_MP3_VMUL(MA_DR_MP3_VLD(&in[i ]), scale); + ma_dr_mp3_f4 b = MA_DR_MP3_VMUL(MA_DR_MP3_VLD(&in[i+4]), scale); +#if MA_DR_MP3_HAVE_SSE + ma_dr_mp3_f4 s16max = MA_DR_MP3_VSET( 32767.0f); + ma_dr_mp3_f4 s16min = MA_DR_MP3_VSET(-32768.0f); + __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, s16max), s16min)), + _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, s16max), s16min))); + out[i ] = (ma_int16)_mm_extract_epi16(pcm8, 0); + out[i+1] = (ma_int16)_mm_extract_epi16(pcm8, 1); + out[i+2] = (ma_int16)_mm_extract_epi16(pcm8, 2); + out[i+3] = (ma_int16)_mm_extract_epi16(pcm8, 3); + out[i+4] = (ma_int16)_mm_extract_epi16(pcm8, 4); + out[i+5] = (ma_int16)_mm_extract_epi16(pcm8, 5); + out[i+6] = (ma_int16)_mm_extract_epi16(pcm8, 6); + out[i+7] = (ma_int16)_mm_extract_epi16(pcm8, 7); +#else + int16x4_t pcma, pcmb; + a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSET(0.5f)); + b = MA_DR_MP3_VADD(b, MA_DR_MP3_VSET(0.5f)); + pcma = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(a), vreinterpretq_s32_u32(vcltq_f32(a, MA_DR_MP3_VSET(0))))); + pcmb = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(b), vreinterpretq_s32_u32(vcltq_f32(b, MA_DR_MP3_VSET(0))))); + vst1_lane_s16(out+i , pcma, 0); + vst1_lane_s16(out+i+1, pcma, 1); + vst1_lane_s16(out+i+2, pcma, 2); + vst1_lane_s16(out+i+3, pcma, 3); + vst1_lane_s16(out+i+4, pcmb, 0); + vst1_lane_s16(out+i+5, pcmb, 1); + vst1_lane_s16(out+i+6, pcmb, 2); + vst1_lane_s16(out+i+7, pcmb, 3); +#endif + } +#endif + for(; i < num_samples; i++) + { + float sample = in[i] * 32768.0f; + if (sample >= 32766.5) + out[i] = (ma_int16) 32767; + else if (sample <= -32767.5) + out[i] = (ma_int16)-32768; + else + { + short s = (ma_int16)(sample + .5f); + s -= (s < 0); + out[i] = s; + } + } +} +#ifndef MA_DR_MP3_SEEK_LEADING_MP3_FRAMES +#define MA_DR_MP3_SEEK_LEADING_MP3_FRAMES 2 +#endif +#define MA_DR_MP3_MIN_DATA_CHUNK_SIZE 16384 +#ifndef MA_DR_MP3_DATA_CHUNK_SIZE +#define MA_DR_MP3_DATA_CHUNK_SIZE (MA_DR_MP3_MIN_DATA_CHUNK_SIZE*4) +#endif +#define MA_DR_MP3_COUNTOF(x) (sizeof(x) / sizeof(x[0])) +#define MA_DR_MP3_CLAMP(x, lo, hi) (MA_DR_MP3_MAX(lo, MA_DR_MP3_MIN(x, hi))) +#ifndef MA_DR_MP3_PI_D +#define MA_DR_MP3_PI_D 3.14159265358979323846264 +#endif +#define MA_DR_MP3_DEFAULT_RESAMPLER_LPF_ORDER 2 +static MA_INLINE float ma_dr_mp3_mix_f32(float x, float y, float a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE float ma_dr_mp3_mix_f32_fast(float x, float y, float a) +{ + float r0 = (y - x); + float r1 = r0*a; + return x + r1; +} +static MA_INLINE ma_uint32 ma_dr_mp3_gcf_u32(ma_uint32 a, ma_uint32 b) +{ + for (;;) { + if (b == 0) { + break; + } else { + ma_uint32 t = a; + a = b; + b = t % a; + } + } + return a; +} +static void* ma_dr_mp3__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_MP3_MALLOC(sz); +} +static void* ma_dr_mp3__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_MP3_REALLOC(p, sz); +} +static void ma_dr_mp3__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_DR_MP3_FREE(p); +} +static void* ma_dr_mp3__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + return NULL; +} +static void* ma_dr_mp3__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + if (p != NULL) { + MA_DR_MP3_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + return p2; + } + return NULL; +} +static void ma_dr_mp3__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} +static ma_allocation_callbacks ma_dr_mp3_copy_allocation_callbacks_or_defaults(const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + return *pAllocationCallbacks; + } else { + ma_allocation_callbacks allocationCallbacks; + allocationCallbacks.pUserData = NULL; + allocationCallbacks.onMalloc = ma_dr_mp3__malloc_default; + allocationCallbacks.onRealloc = ma_dr_mp3__realloc_default; + allocationCallbacks.onFree = ma_dr_mp3__free_default; + return allocationCallbacks; + } +} +static size_t ma_dr_mp3__on_read(ma_dr_mp3* pMP3, void* pBufferOut, size_t bytesToRead) +{ + size_t bytesRead = pMP3->onRead(pMP3->pUserData, pBufferOut, bytesToRead); + pMP3->streamCursor += bytesRead; + return bytesRead; +} +static ma_bool32 ma_dr_mp3__on_seek(ma_dr_mp3* pMP3, int offset, ma_dr_mp3_seek_origin origin) +{ + MA_DR_MP3_ASSERT(offset >= 0); + if (!pMP3->onSeek(pMP3->pUserData, offset, origin)) { + return MA_FALSE; + } + if (origin == ma_dr_mp3_seek_origin_start) { + pMP3->streamCursor = (ma_uint64)offset; + } else { + pMP3->streamCursor += offset; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3__on_seek_64(ma_dr_mp3* pMP3, ma_uint64 offset, ma_dr_mp3_seek_origin origin) +{ + if (offset <= 0x7FFFFFFF) { + return ma_dr_mp3__on_seek(pMP3, (int)offset, origin); + } + if (!ma_dr_mp3__on_seek(pMP3, 0x7FFFFFFF, ma_dr_mp3_seek_origin_start)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + while (offset > 0) { + if (offset <= 0x7FFFFFFF) { + if (!ma_dr_mp3__on_seek(pMP3, (int)offset, ma_dr_mp3_seek_origin_current)) { + return MA_FALSE; + } + offset = 0; + } else { + if (!ma_dr_mp3__on_seek(pMP3, 0x7FFFFFFF, ma_dr_mp3_seek_origin_current)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + } + } + return MA_TRUE; +} +static ma_uint32 ma_dr_mp3_decode_next_frame_ex__callbacks(ma_dr_mp3* pMP3, ma_dr_mp3d_sample_t* pPCMFrames) +{ + ma_uint32 pcmFramesRead = 0; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->onRead != NULL); + if (pMP3->atEnd) { + return 0; + } + for (;;) { + ma_dr_mp3dec_frame_info info; + if (pMP3->dataSize < MA_DR_MP3_MIN_DATA_CHUNK_SIZE) { + size_t bytesRead; + if (pMP3->pData != NULL) { + MA_DR_MP3_MOVE_MEMORY(pMP3->pData, pMP3->pData + pMP3->dataConsumed, pMP3->dataSize); + } + pMP3->dataConsumed = 0; + if (pMP3->dataCapacity < MA_DR_MP3_DATA_CHUNK_SIZE) { + ma_uint8* pNewData; + size_t newDataCap; + newDataCap = MA_DR_MP3_DATA_CHUNK_SIZE; + pNewData = (ma_uint8*)ma_dr_mp3__realloc_from_callbacks(pMP3->pData, newDataCap, pMP3->dataCapacity, &pMP3->allocationCallbacks); + if (pNewData == NULL) { + return 0; + } + pMP3->pData = pNewData; + pMP3->dataCapacity = newDataCap; + } + bytesRead = ma_dr_mp3__on_read(pMP3, pMP3->pData + pMP3->dataSize, (pMP3->dataCapacity - pMP3->dataSize)); + if (bytesRead == 0) { + if (pMP3->dataSize == 0) { + pMP3->atEnd = MA_TRUE; + return 0; + } + } + pMP3->dataSize += bytesRead; + } + if (pMP3->dataSize > INT_MAX) { + pMP3->atEnd = MA_TRUE; + return 0; + } + MA_DR_MP3_ASSERT(pMP3->pData != NULL); + MA_DR_MP3_ASSERT(pMP3->dataCapacity > 0); + if (pMP3->pData == NULL) { + return 0; + } + pcmFramesRead = ma_dr_mp3dec_decode_frame(&pMP3->decoder, pMP3->pData + pMP3->dataConsumed, (int)pMP3->dataSize, pPCMFrames, &info); + if (info.frame_bytes > 0) { + pMP3->dataConsumed += (size_t)info.frame_bytes; + pMP3->dataSize -= (size_t)info.frame_bytes; + } + if (pcmFramesRead > 0) { + pcmFramesRead = ma_dr_mp3_hdr_frame_samples(pMP3->decoder.header); + pMP3->pcmFramesConsumedInMP3Frame = 0; + pMP3->pcmFramesRemainingInMP3Frame = pcmFramesRead; + pMP3->mp3FrameChannels = info.channels; + pMP3->mp3FrameSampleRate = info.hz; + break; + } else if (info.frame_bytes == 0) { + size_t bytesRead; + MA_DR_MP3_MOVE_MEMORY(pMP3->pData, pMP3->pData + pMP3->dataConsumed, pMP3->dataSize); + pMP3->dataConsumed = 0; + if (pMP3->dataCapacity == pMP3->dataSize) { + ma_uint8* pNewData; + size_t newDataCap; + newDataCap = pMP3->dataCapacity + MA_DR_MP3_DATA_CHUNK_SIZE; + pNewData = (ma_uint8*)ma_dr_mp3__realloc_from_callbacks(pMP3->pData, newDataCap, pMP3->dataCapacity, &pMP3->allocationCallbacks); + if (pNewData == NULL) { + return 0; + } + pMP3->pData = pNewData; + pMP3->dataCapacity = newDataCap; + } + bytesRead = ma_dr_mp3__on_read(pMP3, pMP3->pData + pMP3->dataSize, (pMP3->dataCapacity - pMP3->dataSize)); + if (bytesRead == 0) { + pMP3->atEnd = MA_TRUE; + return 0; + } + pMP3->dataSize += bytesRead; + } + }; + return pcmFramesRead; +} +static ma_uint32 ma_dr_mp3_decode_next_frame_ex__memory(ma_dr_mp3* pMP3, ma_dr_mp3d_sample_t* pPCMFrames) +{ + ma_uint32 pcmFramesRead = 0; + ma_dr_mp3dec_frame_info info; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->memory.pData != NULL); + if (pMP3->atEnd) { + return 0; + } + for (;;) { + pcmFramesRead = ma_dr_mp3dec_decode_frame(&pMP3->decoder, pMP3->memory.pData + pMP3->memory.currentReadPos, (int)(pMP3->memory.dataSize - pMP3->memory.currentReadPos), pPCMFrames, &info); + if (pcmFramesRead > 0) { + pcmFramesRead = ma_dr_mp3_hdr_frame_samples(pMP3->decoder.header); + pMP3->pcmFramesConsumedInMP3Frame = 0; + pMP3->pcmFramesRemainingInMP3Frame = pcmFramesRead; + pMP3->mp3FrameChannels = info.channels; + pMP3->mp3FrameSampleRate = info.hz; + break; + } else if (info.frame_bytes > 0) { + pMP3->memory.currentReadPos += (size_t)info.frame_bytes; + } else { + break; + } + } + pMP3->memory.currentReadPos += (size_t)info.frame_bytes; + return pcmFramesRead; +} +static ma_uint32 ma_dr_mp3_decode_next_frame_ex(ma_dr_mp3* pMP3, ma_dr_mp3d_sample_t* pPCMFrames) +{ + if (pMP3->memory.pData != NULL && pMP3->memory.dataSize > 0) { + return ma_dr_mp3_decode_next_frame_ex__memory(pMP3, pPCMFrames); + } else { + return ma_dr_mp3_decode_next_frame_ex__callbacks(pMP3, pPCMFrames); + } +} +static ma_uint32 ma_dr_mp3_decode_next_frame(ma_dr_mp3* pMP3) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + return ma_dr_mp3_decode_next_frame_ex(pMP3, (ma_dr_mp3d_sample_t*)pMP3->pcmFrames); +} +#if 0 +static ma_uint32 ma_dr_mp3_seek_next_frame(ma_dr_mp3* pMP3) +{ + ma_uint32 pcmFrameCount; + MA_DR_MP3_ASSERT(pMP3 != NULL); + pcmFrameCount = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFrameCount == 0) { + return 0; + } + pMP3->currentPCMFrame += pcmFrameCount; + pMP3->pcmFramesConsumedInMP3Frame = pcmFrameCount; + pMP3->pcmFramesRemainingInMP3Frame = 0; + return pcmFrameCount; +} +#endif +static ma_bool32 ma_dr_mp3_init_internal(ma_dr_mp3* pMP3, ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(onRead != NULL); + ma_dr_mp3dec_init(&pMP3->decoder); + pMP3->onRead = onRead; + pMP3->onSeek = onSeek; + pMP3->pUserData = pUserData; + pMP3->allocationCallbacks = ma_dr_mp3_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + if (pMP3->allocationCallbacks.onFree == NULL || (pMP3->allocationCallbacks.onMalloc == NULL && pMP3->allocationCallbacks.onRealloc == NULL)) { + return MA_FALSE; + } + if (ma_dr_mp3_decode_next_frame(pMP3) == 0) { + ma_dr_mp3__free_from_callbacks(pMP3->pData, &pMP3->allocationCallbacks); + return MA_FALSE; + } + pMP3->channels = pMP3->mp3FrameChannels; + pMP3->sampleRate = pMP3->mp3FrameSampleRate; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_init(ma_dr_mp3* pMP3, ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pMP3 == NULL || onRead == NULL) { + return MA_FALSE; + } + MA_DR_MP3_ZERO_OBJECT(pMP3); + return ma_dr_mp3_init_internal(pMP3, onRead, onSeek, pUserData, pAllocationCallbacks); +} +static size_t ma_dr_mp3__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_dr_mp3* pMP3 = (ma_dr_mp3*)pUserData; + size_t bytesRemaining; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->memory.dataSize >= pMP3->memory.currentReadPos); + bytesRemaining = pMP3->memory.dataSize - pMP3->memory.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (bytesToRead > 0) { + MA_DR_MP3_COPY_MEMORY(pBufferOut, pMP3->memory.pData + pMP3->memory.currentReadPos, bytesToRead); + pMP3->memory.currentReadPos += bytesToRead; + } + return bytesToRead; +} +static ma_bool32 ma_dr_mp3__on_seek_memory(void* pUserData, int byteOffset, ma_dr_mp3_seek_origin origin) +{ + ma_dr_mp3* pMP3 = (ma_dr_mp3*)pUserData; + MA_DR_MP3_ASSERT(pMP3 != NULL); + if (origin == ma_dr_mp3_seek_origin_current) { + if (byteOffset > 0) { + if (pMP3->memory.currentReadPos + byteOffset > pMP3->memory.dataSize) { + byteOffset = (int)(pMP3->memory.dataSize - pMP3->memory.currentReadPos); + } + } else { + if (pMP3->memory.currentReadPos < (size_t)-byteOffset) { + byteOffset = -(int)pMP3->memory.currentReadPos; + } + } + pMP3->memory.currentReadPos += byteOffset; + } else { + if ((ma_uint32)byteOffset <= pMP3->memory.dataSize) { + pMP3->memory.currentReadPos = byteOffset; + } else { + pMP3->memory.currentReadPos = pMP3->memory.dataSize; + } + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_init_memory(ma_dr_mp3* pMP3, const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pMP3 == NULL) { + return MA_FALSE; + } + MA_DR_MP3_ZERO_OBJECT(pMP3); + if (pData == NULL || dataSize == 0) { + return MA_FALSE; + } + pMP3->memory.pData = (const ma_uint8*)pData; + pMP3->memory.dataSize = dataSize; + pMP3->memory.currentReadPos = 0; + return ma_dr_mp3_init_internal(pMP3, ma_dr_mp3__on_read_memory, ma_dr_mp3__on_seek_memory, pMP3, pAllocationCallbacks); +} +#ifndef MA_DR_MP3_NO_STDIO +#include +#include +static size_t ma_dr_mp3__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData); +} +static ma_bool32 ma_dr_mp3__on_seek_stdio(void* pUserData, int offset, ma_dr_mp3_seek_origin origin) +{ + return fseek((FILE*)pUserData, offset, (origin == ma_dr_mp3_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} +MA_API ma_bool32 ma_dr_mp3_init_file(ma_dr_mp3* pMP3, const char* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + FILE* pFile; + if (ma_fopen(&pFile, pFilePath, "rb") != MA_SUCCESS) { + return MA_FALSE; + } + result = ma_dr_mp3_init(pMP3, ma_dr_mp3__on_read_stdio, ma_dr_mp3__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_init_file_w(ma_dr_mp3* pMP3, const wchar_t* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + FILE* pFile; + if (ma_wfopen(&pFile, pFilePath, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + result = ma_dr_mp3_init(pMP3, ma_dr_mp3__on_read_stdio, ma_dr_mp3__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +#endif +MA_API void ma_dr_mp3_uninit(ma_dr_mp3* pMP3) +{ + if (pMP3 == NULL) { + return; + } +#ifndef MA_DR_MP3_NO_STDIO + if (pMP3->onRead == ma_dr_mp3__on_read_stdio) { + FILE* pFile = (FILE*)pMP3->pUserData; + if (pFile != NULL) { + fclose(pFile); + pMP3->pUserData = NULL; + } + } +#endif + ma_dr_mp3__free_from_callbacks(pMP3->pData, &pMP3->allocationCallbacks); +} +#if defined(MA_DR_MP3_FLOAT_OUTPUT) +static void ma_dr_mp3_f32_to_s16(ma_int16* dst, const float* src, ma_uint64 sampleCount) +{ + ma_uint64 i; + ma_uint64 i4; + ma_uint64 sampleCount4; + i = 0; + sampleCount4 = sampleCount >> 2; + for (i4 = 0; i4 < sampleCount4; i4 += 1) { + float x0 = src[i+0]; + float x1 = src[i+1]; + float x2 = src[i+2]; + float x3 = src[i+3]; + x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0)); + x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1)); + x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2)); + x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3)); + x0 = x0 * 32767.0f; + x1 = x1 * 32767.0f; + x2 = x2 * 32767.0f; + x3 = x3 * 32767.0f; + dst[i+0] = (ma_int16)x0; + dst[i+1] = (ma_int16)x1; + dst[i+2] = (ma_int16)x2; + dst[i+3] = (ma_int16)x3; + i += 4; + } + for (; i < sampleCount; i += 1) { + float x = src[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + x = x * 32767.0f; + dst[i] = (ma_int16)x; + } +} +#endif +#if !defined(MA_DR_MP3_FLOAT_OUTPUT) +static void ma_dr_mp3_s16_to_f32(float* dst, const ma_int16* src, ma_uint64 sampleCount) +{ + ma_uint64 i; + for (i = 0; i < sampleCount; i += 1) { + float x = (float)src[i]; + x = x * 0.000030517578125f; + dst[i] = x; + } +} +#endif +static ma_uint64 ma_dr_mp3_read_pcm_frames_raw(ma_dr_mp3* pMP3, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->onRead != NULL); + while (framesToRead > 0) { + ma_uint32 framesToConsume = (ma_uint32)MA_DR_MP3_MIN(pMP3->pcmFramesRemainingInMP3Frame, framesToRead); + if (pBufferOut != NULL) { +#if defined(MA_DR_MP3_FLOAT_OUTPUT) + float* pFramesOutF32 = (float*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(float) * totalFramesRead * pMP3->channels); + float* pFramesInF32 = (float*)MA_DR_MP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(float) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels); + MA_DR_MP3_COPY_MEMORY(pFramesOutF32, pFramesInF32, sizeof(float) * framesToConsume * pMP3->channels); +#else + ma_int16* pFramesOutS16 = (ma_int16*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(ma_int16) * totalFramesRead * pMP3->channels); + ma_int16* pFramesInS16 = (ma_int16*)MA_DR_MP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(ma_int16) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels); + MA_DR_MP3_COPY_MEMORY(pFramesOutS16, pFramesInS16, sizeof(ma_int16) * framesToConsume * pMP3->channels); +#endif + } + pMP3->currentPCMFrame += framesToConsume; + pMP3->pcmFramesConsumedInMP3Frame += framesToConsume; + pMP3->pcmFramesRemainingInMP3Frame -= framesToConsume; + totalFramesRead += framesToConsume; + framesToRead -= framesToConsume; + if (framesToRead == 0) { + break; + } + MA_DR_MP3_ASSERT(pMP3->pcmFramesRemainingInMP3Frame == 0); + if (ma_dr_mp3_decode_next_frame(pMP3) == 0) { + break; + } + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_f32(ma_dr_mp3* pMP3, ma_uint64 framesToRead, float* pBufferOut) +{ + if (pMP3 == NULL || pMP3->onRead == NULL) { + return 0; + } +#if defined(MA_DR_MP3_FLOAT_OUTPUT) + return ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut); +#else + { + ma_int16 pTempS16[8192]; + ma_uint64 totalPCMFramesRead = 0; + while (totalPCMFramesRead < framesToRead) { + ma_uint64 framesJustRead; + ma_uint64 framesRemaining = framesToRead - totalPCMFramesRead; + ma_uint64 framesToReadNow = MA_DR_MP3_COUNTOF(pTempS16) / pMP3->channels; + if (framesToReadNow > framesRemaining) { + framesToReadNow = framesRemaining; + } + framesJustRead = ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToReadNow, pTempS16); + if (framesJustRead == 0) { + break; + } + ma_dr_mp3_s16_to_f32((float*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(float) * totalPCMFramesRead * pMP3->channels), pTempS16, framesJustRead * pMP3->channels); + totalPCMFramesRead += framesJustRead; + } + return totalPCMFramesRead; + } +#endif +} +MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_s16(ma_dr_mp3* pMP3, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + if (pMP3 == NULL || pMP3->onRead == NULL) { + return 0; + } +#if !defined(MA_DR_MP3_FLOAT_OUTPUT) + return ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut); +#else + { + float pTempF32[4096]; + ma_uint64 totalPCMFramesRead = 0; + while (totalPCMFramesRead < framesToRead) { + ma_uint64 framesJustRead; + ma_uint64 framesRemaining = framesToRead - totalPCMFramesRead; + ma_uint64 framesToReadNow = MA_DR_MP3_COUNTOF(pTempF32) / pMP3->channels; + if (framesToReadNow > framesRemaining) { + framesToReadNow = framesRemaining; + } + framesJustRead = ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToReadNow, pTempF32); + if (framesJustRead == 0) { + break; + } + ma_dr_mp3_f32_to_s16((ma_int16*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(ma_int16) * totalPCMFramesRead * pMP3->channels), pTempF32, framesJustRead * pMP3->channels); + totalPCMFramesRead += framesJustRead; + } + return totalPCMFramesRead; + } +#endif +} +static void ma_dr_mp3_reset(ma_dr_mp3* pMP3) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + pMP3->pcmFramesConsumedInMP3Frame = 0; + pMP3->pcmFramesRemainingInMP3Frame = 0; + pMP3->currentPCMFrame = 0; + pMP3->dataSize = 0; + pMP3->atEnd = MA_FALSE; + ma_dr_mp3dec_init(&pMP3->decoder); +} +static ma_bool32 ma_dr_mp3_seek_to_start_of_stream(ma_dr_mp3* pMP3) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->onSeek != NULL); + if (!ma_dr_mp3__on_seek(pMP3, 0, ma_dr_mp3_seek_origin_start)) { + return MA_FALSE; + } + ma_dr_mp3_reset(pMP3); + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3_seek_forward_by_pcm_frames__brute_force(ma_dr_mp3* pMP3, ma_uint64 frameOffset) +{ + ma_uint64 framesRead; +#if defined(MA_DR_MP3_FLOAT_OUTPUT) + framesRead = ma_dr_mp3_read_pcm_frames_f32(pMP3, frameOffset, NULL); +#else + framesRead = ma_dr_mp3_read_pcm_frames_s16(pMP3, frameOffset, NULL); +#endif + if (framesRead != frameOffset) { + return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3_seek_to_pcm_frame__brute_force(ma_dr_mp3* pMP3, ma_uint64 frameIndex) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + if (frameIndex == pMP3->currentPCMFrame) { + return MA_TRUE; + } + if (frameIndex < pMP3->currentPCMFrame) { + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + } + MA_DR_MP3_ASSERT(frameIndex >= pMP3->currentPCMFrame); + return ma_dr_mp3_seek_forward_by_pcm_frames__brute_force(pMP3, (frameIndex - pMP3->currentPCMFrame)); +} +static ma_bool32 ma_dr_mp3_find_closest_seek_point(ma_dr_mp3* pMP3, ma_uint64 frameIndex, ma_uint32* pSeekPointIndex) +{ + ma_uint32 iSeekPoint; + MA_DR_MP3_ASSERT(pSeekPointIndex != NULL); + *pSeekPointIndex = 0; + if (frameIndex < pMP3->pSeekPoints[0].pcmFrameIndex) { + return MA_FALSE; + } + for (iSeekPoint = 0; iSeekPoint < pMP3->seekPointCount; ++iSeekPoint) { + if (pMP3->pSeekPoints[iSeekPoint].pcmFrameIndex > frameIndex) { + break; + } + *pSeekPointIndex = iSeekPoint; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3_seek_to_pcm_frame__seek_table(ma_dr_mp3* pMP3, ma_uint64 frameIndex) +{ + ma_dr_mp3_seek_point seekPoint; + ma_uint32 priorSeekPointIndex; + ma_uint16 iMP3Frame; + ma_uint64 leftoverFrames; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->pSeekPoints != NULL); + MA_DR_MP3_ASSERT(pMP3->seekPointCount > 0); + if (ma_dr_mp3_find_closest_seek_point(pMP3, frameIndex, &priorSeekPointIndex)) { + seekPoint = pMP3->pSeekPoints[priorSeekPointIndex]; + } else { + seekPoint.seekPosInBytes = 0; + seekPoint.pcmFrameIndex = 0; + seekPoint.mp3FramesToDiscard = 0; + seekPoint.pcmFramesToDiscard = 0; + } + if (!ma_dr_mp3__on_seek_64(pMP3, seekPoint.seekPosInBytes, ma_dr_mp3_seek_origin_start)) { + return MA_FALSE; + } + ma_dr_mp3_reset(pMP3); + for (iMP3Frame = 0; iMP3Frame < seekPoint.mp3FramesToDiscard; ++iMP3Frame) { + ma_uint32 pcmFramesRead; + ma_dr_mp3d_sample_t* pPCMFrames; + pPCMFrames = NULL; + if (iMP3Frame == seekPoint.mp3FramesToDiscard-1) { + pPCMFrames = (ma_dr_mp3d_sample_t*)pMP3->pcmFrames; + } + pcmFramesRead = ma_dr_mp3_decode_next_frame_ex(pMP3, pPCMFrames); + if (pcmFramesRead == 0) { + return MA_FALSE; + } + } + pMP3->currentPCMFrame = seekPoint.pcmFrameIndex - seekPoint.pcmFramesToDiscard; + leftoverFrames = frameIndex - pMP3->currentPCMFrame; + return ma_dr_mp3_seek_forward_by_pcm_frames__brute_force(pMP3, leftoverFrames); +} +MA_API ma_bool32 ma_dr_mp3_seek_to_pcm_frame(ma_dr_mp3* pMP3, ma_uint64 frameIndex) +{ + if (pMP3 == NULL || pMP3->onSeek == NULL) { + return MA_FALSE; + } + if (frameIndex == 0) { + return ma_dr_mp3_seek_to_start_of_stream(pMP3); + } + if (pMP3->pSeekPoints != NULL && pMP3->seekPointCount > 0) { + return ma_dr_mp3_seek_to_pcm_frame__seek_table(pMP3, frameIndex); + } else { + return ma_dr_mp3_seek_to_pcm_frame__brute_force(pMP3, frameIndex); + } +} +MA_API ma_bool32 ma_dr_mp3_get_mp3_and_pcm_frame_count(ma_dr_mp3* pMP3, ma_uint64* pMP3FrameCount, ma_uint64* pPCMFrameCount) +{ + ma_uint64 currentPCMFrame; + ma_uint64 totalPCMFrameCount; + ma_uint64 totalMP3FrameCount; + if (pMP3 == NULL) { + return MA_FALSE; + } + if (pMP3->onSeek == NULL) { + return MA_FALSE; + } + currentPCMFrame = pMP3->currentPCMFrame; + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + totalPCMFrameCount = 0; + totalMP3FrameCount = 0; + for (;;) { + ma_uint32 pcmFramesInCurrentMP3Frame; + pcmFramesInCurrentMP3Frame = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFramesInCurrentMP3Frame == 0) { + break; + } + totalPCMFrameCount += pcmFramesInCurrentMP3Frame; + totalMP3FrameCount += 1; + } + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + if (!ma_dr_mp3_seek_to_pcm_frame(pMP3, currentPCMFrame)) { + return MA_FALSE; + } + if (pMP3FrameCount != NULL) { + *pMP3FrameCount = totalMP3FrameCount; + } + if (pPCMFrameCount != NULL) { + *pPCMFrameCount = totalPCMFrameCount; + } + return MA_TRUE; +} +MA_API ma_uint64 ma_dr_mp3_get_pcm_frame_count(ma_dr_mp3* pMP3) +{ + ma_uint64 totalPCMFrameCount; + if (!ma_dr_mp3_get_mp3_and_pcm_frame_count(pMP3, NULL, &totalPCMFrameCount)) { + return 0; + } + return totalPCMFrameCount; +} +MA_API ma_uint64 ma_dr_mp3_get_mp3_frame_count(ma_dr_mp3* pMP3) +{ + ma_uint64 totalMP3FrameCount; + if (!ma_dr_mp3_get_mp3_and_pcm_frame_count(pMP3, &totalMP3FrameCount, NULL)) { + return 0; + } + return totalMP3FrameCount; +} +static void ma_dr_mp3__accumulate_running_pcm_frame_count(ma_dr_mp3* pMP3, ma_uint32 pcmFrameCountIn, ma_uint64* pRunningPCMFrameCount, float* pRunningPCMFrameCountFractionalPart) +{ + float srcRatio; + float pcmFrameCountOutF; + ma_uint32 pcmFrameCountOut; + srcRatio = (float)pMP3->mp3FrameSampleRate / (float)pMP3->sampleRate; + MA_DR_MP3_ASSERT(srcRatio > 0); + pcmFrameCountOutF = *pRunningPCMFrameCountFractionalPart + (pcmFrameCountIn / srcRatio); + pcmFrameCountOut = (ma_uint32)pcmFrameCountOutF; + *pRunningPCMFrameCountFractionalPart = pcmFrameCountOutF - pcmFrameCountOut; + *pRunningPCMFrameCount += pcmFrameCountOut; +} +typedef struct +{ + ma_uint64 bytePos; + ma_uint64 pcmFrameIndex; +} ma_dr_mp3__seeking_mp3_frame_info; +MA_API ma_bool32 ma_dr_mp3_calculate_seek_points(ma_dr_mp3* pMP3, ma_uint32* pSeekPointCount, ma_dr_mp3_seek_point* pSeekPoints) +{ + ma_uint32 seekPointCount; + ma_uint64 currentPCMFrame; + ma_uint64 totalMP3FrameCount; + ma_uint64 totalPCMFrameCount; + if (pMP3 == NULL || pSeekPointCount == NULL || pSeekPoints == NULL) { + return MA_FALSE; + } + seekPointCount = *pSeekPointCount; + if (seekPointCount == 0) { + return MA_FALSE; + } + currentPCMFrame = pMP3->currentPCMFrame; + if (!ma_dr_mp3_get_mp3_and_pcm_frame_count(pMP3, &totalMP3FrameCount, &totalPCMFrameCount)) { + return MA_FALSE; + } + if (totalMP3FrameCount < MA_DR_MP3_SEEK_LEADING_MP3_FRAMES+1) { + seekPointCount = 1; + pSeekPoints[0].seekPosInBytes = 0; + pSeekPoints[0].pcmFrameIndex = 0; + pSeekPoints[0].mp3FramesToDiscard = 0; + pSeekPoints[0].pcmFramesToDiscard = 0; + } else { + ma_uint64 pcmFramesBetweenSeekPoints; + ma_dr_mp3__seeking_mp3_frame_info mp3FrameInfo[MA_DR_MP3_SEEK_LEADING_MP3_FRAMES+1]; + ma_uint64 runningPCMFrameCount = 0; + float runningPCMFrameCountFractionalPart = 0; + ma_uint64 nextTargetPCMFrame; + ma_uint32 iMP3Frame; + ma_uint32 iSeekPoint; + if (seekPointCount > totalMP3FrameCount-1) { + seekPointCount = (ma_uint32)totalMP3FrameCount-1; + } + pcmFramesBetweenSeekPoints = totalPCMFrameCount / (seekPointCount+1); + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + for (iMP3Frame = 0; iMP3Frame < MA_DR_MP3_SEEK_LEADING_MP3_FRAMES+1; ++iMP3Frame) { + ma_uint32 pcmFramesInCurrentMP3FrameIn; + MA_DR_MP3_ASSERT(pMP3->streamCursor >= pMP3->dataSize); + mp3FrameInfo[iMP3Frame].bytePos = pMP3->streamCursor - pMP3->dataSize; + mp3FrameInfo[iMP3Frame].pcmFrameIndex = runningPCMFrameCount; + pcmFramesInCurrentMP3FrameIn = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFramesInCurrentMP3FrameIn == 0) { + return MA_FALSE; + } + ma_dr_mp3__accumulate_running_pcm_frame_count(pMP3, pcmFramesInCurrentMP3FrameIn, &runningPCMFrameCount, &runningPCMFrameCountFractionalPart); + } + nextTargetPCMFrame = 0; + for (iSeekPoint = 0; iSeekPoint < seekPointCount; ++iSeekPoint) { + nextTargetPCMFrame += pcmFramesBetweenSeekPoints; + for (;;) { + if (nextTargetPCMFrame < runningPCMFrameCount) { + pSeekPoints[iSeekPoint].seekPosInBytes = mp3FrameInfo[0].bytePos; + pSeekPoints[iSeekPoint].pcmFrameIndex = nextTargetPCMFrame; + pSeekPoints[iSeekPoint].mp3FramesToDiscard = MA_DR_MP3_SEEK_LEADING_MP3_FRAMES; + pSeekPoints[iSeekPoint].pcmFramesToDiscard = (ma_uint16)(nextTargetPCMFrame - mp3FrameInfo[MA_DR_MP3_SEEK_LEADING_MP3_FRAMES-1].pcmFrameIndex); + break; + } else { + size_t i; + ma_uint32 pcmFramesInCurrentMP3FrameIn; + for (i = 0; i < MA_DR_MP3_COUNTOF(mp3FrameInfo)-1; ++i) { + mp3FrameInfo[i] = mp3FrameInfo[i+1]; + } + mp3FrameInfo[MA_DR_MP3_COUNTOF(mp3FrameInfo)-1].bytePos = pMP3->streamCursor - pMP3->dataSize; + mp3FrameInfo[MA_DR_MP3_COUNTOF(mp3FrameInfo)-1].pcmFrameIndex = runningPCMFrameCount; + pcmFramesInCurrentMP3FrameIn = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFramesInCurrentMP3FrameIn == 0) { + pSeekPoints[iSeekPoint].seekPosInBytes = mp3FrameInfo[0].bytePos; + pSeekPoints[iSeekPoint].pcmFrameIndex = nextTargetPCMFrame; + pSeekPoints[iSeekPoint].mp3FramesToDiscard = MA_DR_MP3_SEEK_LEADING_MP3_FRAMES; + pSeekPoints[iSeekPoint].pcmFramesToDiscard = (ma_uint16)(nextTargetPCMFrame - mp3FrameInfo[MA_DR_MP3_SEEK_LEADING_MP3_FRAMES-1].pcmFrameIndex); + break; + } + ma_dr_mp3__accumulate_running_pcm_frame_count(pMP3, pcmFramesInCurrentMP3FrameIn, &runningPCMFrameCount, &runningPCMFrameCountFractionalPart); + } + } + } + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + if (!ma_dr_mp3_seek_to_pcm_frame(pMP3, currentPCMFrame)) { + return MA_FALSE; + } + } + *pSeekPointCount = seekPointCount; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_bind_seek_table(ma_dr_mp3* pMP3, ma_uint32 seekPointCount, ma_dr_mp3_seek_point* pSeekPoints) +{ + if (pMP3 == NULL) { + return MA_FALSE; + } + if (seekPointCount == 0 || pSeekPoints == NULL) { + pMP3->seekPointCount = 0; + pMP3->pSeekPoints = NULL; + } else { + pMP3->seekPointCount = seekPointCount; + pMP3->pSeekPoints = pSeekPoints; + } + return MA_TRUE; +} +static float* ma_dr_mp3__full_read_and_close_f32(ma_dr_mp3* pMP3, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount) +{ + ma_uint64 totalFramesRead = 0; + ma_uint64 framesCapacity = 0; + float* pFrames = NULL; + float temp[4096]; + MA_DR_MP3_ASSERT(pMP3 != NULL); + for (;;) { + ma_uint64 framesToReadRightNow = MA_DR_MP3_COUNTOF(temp) / pMP3->channels; + ma_uint64 framesJustRead = ma_dr_mp3_read_pcm_frames_f32(pMP3, framesToReadRightNow, temp); + if (framesJustRead == 0) { + break; + } + if (framesCapacity < totalFramesRead + framesJustRead) { + ma_uint64 oldFramesBufferSize; + ma_uint64 newFramesBufferSize; + ma_uint64 newFramesCap; + float* pNewFrames; + newFramesCap = framesCapacity * 2; + if (newFramesCap < totalFramesRead + framesJustRead) { + newFramesCap = totalFramesRead + framesJustRead; + } + oldFramesBufferSize = framesCapacity * pMP3->channels * sizeof(float); + newFramesBufferSize = newFramesCap * pMP3->channels * sizeof(float); + if (newFramesBufferSize > (ma_uint64)MA_SIZE_MAX) { + break; + } + pNewFrames = (float*)ma_dr_mp3__realloc_from_callbacks(pFrames, (size_t)newFramesBufferSize, (size_t)oldFramesBufferSize, &pMP3->allocationCallbacks); + if (pNewFrames == NULL) { + ma_dr_mp3__free_from_callbacks(pFrames, &pMP3->allocationCallbacks); + break; + } + pFrames = pNewFrames; + framesCapacity = newFramesCap; + } + MA_DR_MP3_COPY_MEMORY(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(float))); + totalFramesRead += framesJustRead; + if (framesJustRead != framesToReadRightNow) { + break; + } + } + if (pConfig != NULL) { + pConfig->channels = pMP3->channels; + pConfig->sampleRate = pMP3->sampleRate; + } + ma_dr_mp3_uninit(pMP3); + if (pTotalFrameCount) { + *pTotalFrameCount = totalFramesRead; + } + return pFrames; +} +static ma_int16* ma_dr_mp3__full_read_and_close_s16(ma_dr_mp3* pMP3, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount) +{ + ma_uint64 totalFramesRead = 0; + ma_uint64 framesCapacity = 0; + ma_int16* pFrames = NULL; + ma_int16 temp[4096]; + MA_DR_MP3_ASSERT(pMP3 != NULL); + for (;;) { + ma_uint64 framesToReadRightNow = MA_DR_MP3_COUNTOF(temp) / pMP3->channels; + ma_uint64 framesJustRead = ma_dr_mp3_read_pcm_frames_s16(pMP3, framesToReadRightNow, temp); + if (framesJustRead == 0) { + break; + } + if (framesCapacity < totalFramesRead + framesJustRead) { + ma_uint64 newFramesBufferSize; + ma_uint64 oldFramesBufferSize; + ma_uint64 newFramesCap; + ma_int16* pNewFrames; + newFramesCap = framesCapacity * 2; + if (newFramesCap < totalFramesRead + framesJustRead) { + newFramesCap = totalFramesRead + framesJustRead; + } + oldFramesBufferSize = framesCapacity * pMP3->channels * sizeof(ma_int16); + newFramesBufferSize = newFramesCap * pMP3->channels * sizeof(ma_int16); + if (newFramesBufferSize > (ma_uint64)MA_SIZE_MAX) { + break; + } + pNewFrames = (ma_int16*)ma_dr_mp3__realloc_from_callbacks(pFrames, (size_t)newFramesBufferSize, (size_t)oldFramesBufferSize, &pMP3->allocationCallbacks); + if (pNewFrames == NULL) { + ma_dr_mp3__free_from_callbacks(pFrames, &pMP3->allocationCallbacks); + break; + } + pFrames = pNewFrames; + framesCapacity = newFramesCap; + } + MA_DR_MP3_COPY_MEMORY(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(ma_int16))); + totalFramesRead += framesJustRead; + if (framesJustRead != framesToReadRightNow) { + break; + } + } + if (pConfig != NULL) { + pConfig->channels = pMP3->channels; + pConfig->sampleRate = pMP3->sampleRate; + } + ma_dr_mp3_uninit(pMP3); + if (pTotalFrameCount) { + *pTotalFrameCount = totalFramesRead; + } + return pFrames; +} +MA_API float* ma_dr_mp3_open_and_read_pcm_frames_f32(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init(&mp3, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount); +} +MA_API ma_int16* ma_dr_mp3_open_and_read_pcm_frames_s16(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init(&mp3, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_s16(&mp3, pConfig, pTotalFrameCount); +} +MA_API float* ma_dr_mp3_open_memory_and_read_pcm_frames_f32(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_memory(&mp3, pData, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount); +} +MA_API ma_int16* ma_dr_mp3_open_memory_and_read_pcm_frames_s16(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_memory(&mp3, pData, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_s16(&mp3, pConfig, pTotalFrameCount); +} +#ifndef MA_DR_MP3_NO_STDIO +MA_API float* ma_dr_mp3_open_file_and_read_pcm_frames_f32(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_file(&mp3, filePath, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount); +} +MA_API ma_int16* ma_dr_mp3_open_file_and_read_pcm_frames_s16(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_file(&mp3, filePath, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_s16(&mp3, pConfig, pTotalFrameCount); +} +#endif +MA_API void* ma_dr_mp3_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + return ma_dr_mp3__malloc_from_callbacks(sz, pAllocationCallbacks); + } else { + return ma_dr_mp3__malloc_default(sz, NULL); + } +} +MA_API void ma_dr_mp3_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + ma_dr_mp3__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma_dr_mp3__free_default(p, NULL); + } +} +#endif +/* dr_mp3_c end */ +#endif /* MA_DR_MP3_IMPLEMENTATION */ +#endif /* MA_NO_MP3 */ + + +/* End globally disabled warnings. */ +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +#endif /* miniaudio_c */ +#endif /* MINIAUDIO_IMPLEMENTATION */ + + +/* +This software is available as a choice of the following licenses. Choose +whichever you prefer. + +=============================================================================== +ALTERNATIVE 1 - Public Domain (www.unlicense.org) +=============================================================================== +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. + +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to + +=============================================================================== +ALTERNATIVE 2 - MIT No Attribution +=============================================================================== +Copyright 2023 David Reid + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ \ No newline at end of file diff --git a/Example/olcPGEX_Graphics3D.h b/Example/olcPGEX_Graphics3D.h new file mode 100644 index 0000000..bcc0960 --- /dev/null +++ b/Example/olcPGEX_Graphics3D.h @@ -0,0 +1,1730 @@ +#include "olcPixelGameEngine.h" +/* +olcPGEX_Graphics3D.h + ++-------------------------------------------------------------+ +| OneLoneCoder Pixel Game Engine Extension | +| 3D Rendering - v0.4 | ++-------------------------------------------------------------+ + +What is this? +~~~~~~~~~~~~~ +This is an extension to the olcPixelGameEngine, which provides +support for software rendering 3D graphics. + +NOTE!!! This file is under development and may change! + +Big Thanks to MaGetzUb for finding an OOB error, and joshinils +for pointing out sampling inaccuracy. + +License (OLC-3) +~~~~~~~~~~~~~~~ + +Copyright 2018-2019 OneLoneCoder.com + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions or derivations of source code must retain the above +copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions or derivative works in binary form must reproduce +the above copyright notice. This list of conditions and the following +disclaimer must be reproduced in the documentation and/or other +materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Links +~~~~~ +YouTube: https://www.youtube.com/javidx9 +Discord: https://discord.gg/WhwHUMV +Twitter: https://www.twitter.com/javidx9 +Twitch: https://www.twitch.tv/javidx9 +GitHub: https://www.github.com/onelonecoder +Patreon: https://www.patreon.com/javidx9 +Homepage: https://www.onelonecoder.com + +Author +~~~~~~ +David Barr, aka javidx9, �OneLoneCoder 2018 +*/ + + +#ifndef OLC_PGEX_GFX3D +#define OLC_PGEX_GFX3D + +#include +#include +#include +#include +#include +#include +#undef min +#undef max + +//#include + +namespace olc +{ + // Container class for Advanced 2D Drawing functions + class GFX3D : public olc::PGEX + { + + public: + + struct vec2d + { + float x = 0; + float y = 0; + float z = 0; + }; + + struct vec3d + { + float x = 0; + float y = 0; + float z = 0; + float w = 1; // Need a 4th term to perform sensible matrix vector multiplication + }; + + struct triangle + { + vec3d p[3]; + vec2d t[3]; + olc::Pixel col[3]; + }; + + struct mat4x4 + { + float m[4][4] = { 0 }; + }; + + struct mesh + { + std::vector tris; + bool LoadOBJFile(std::string sFilename, bool bHasTexture = false); + }; + + /*class MipMap : public olc::Sprite + { + public: + MipMap(); + MipMap(std::string sImageFile); + MipMap(std::string sImageFile, olc::ResourcePack *pack); + MipMap(int32_t w, int32_t h); + ~MipMap(); + + public: + olc::rcode LoadFromFile(std::string sImageFile, olc::ResourcePack *pack = nullptr); + olc::rcode LoadFromPGESprFile(std::string sImageFile, olc::ResourcePack *pack = nullptr); + Pixel Sample(float x, float y, float z); + Pixel SampleBL(float u, float v, float z); + + private: + int GenerateMipLevels(); + std::vector vecMipMaps; + + };*/ + + class Math + { + public: + Math(); + public: + static vec3d Mat_MultiplyVector(mat4x4 &m, vec3d &i); + static mat4x4 Mat_MultiplyMatrix(mat4x4 &m1, mat4x4 &m2); + static mat4x4 Mat_MakeIdentity(); + static mat4x4 Mat_MakeRotationX(float fAngleRad); + static mat4x4 Mat_MakeRotationY(float fAngleRad); + static mat4x4 Mat_MakeRotationZ(float fAngleRad); + static mat4x4 Mat_MakeScale(float x, float y, float z); + static mat4x4 Mat_MakeTranslation(float x, float y, float z); + static mat4x4 Mat_MakeProjection(float fFovDegrees, float fAspectRatio, float fNear, float fFar); + static mat4x4 Mat_PointAt(vec3d &pos, vec3d &target, vec3d &up); + static mat4x4 Mat_QuickInverse(mat4x4 &m); // Only for Rotation/Translation Matrices + static mat4x4 Mat_Inverse(olc::GFX3D::mat4x4 &m); + + static vec3d Vec_Add(vec3d &v1, vec3d &v2); + static vec3d Vec_Sub(vec3d &v1, vec3d &v2); + static vec3d Vec_Mul(vec3d &v1, float k); + static vec3d Vec_Div(vec3d &v1, float k); + static float Vec_DotProduct(vec3d &v1, vec3d &v2); + static float Vec_Length(vec3d &v); + static vec3d Vec_Normalise(vec3d &v); + static vec3d Vec_CrossProduct(vec3d &v1, vec3d &v2); + static vec3d Vec_IntersectPlane(vec3d &plane_p, vec3d &plane_n, vec3d &lineStart, vec3d &lineEnd, float &t); + + static int Triangle_ClipAgainstPlane(vec3d plane_p, vec3d plane_n, triangle &in_tri, triangle &out_tri1, triangle &out_tri2); + }; + + enum RENDERFLAGS + { + RENDER_WIRE = 0x01, + RENDER_FLAT = 0x02, + RENDER_TEXTURED = 0x04, + RENDER_CULL_CW = 0x08, + RENDER_CULL_CCW = 0x10, + RENDER_DEPTH = 0x20, + RENDER_LIGHTS = 0x40, + }; + + enum LIGHTS + { + LIGHT_DISABLED, + LIGHT_AMBIENT, + LIGHT_DIRECTIONAL, + LIGHT_POINT + }; + + + class PipeLine + { + public: + PipeLine(); + + public: + void SetProjection(float fFovDegrees, float fAspectRatio, float fNear, float fFar, float fLeft, float fTop, float fWidth, float fHeight); + void SetCamera(olc::GFX3D::vec3d &pos, olc::GFX3D::vec3d &lookat, olc::GFX3D::vec3d &up); + void SetTransform(olc::GFX3D::mat4x4 &transform); + void SetTexture(olc::Sprite *texture); + //void SetMipMapTexture(olc::GFX3D::MipMap *texture); + void SetLightSource(uint32_t nSlot, uint32_t nType, olc::Pixel col, olc::GFX3D::vec3d pos, olc::GFX3D::vec3d dir = { 0.0f, 0.0f, 1.0f }, float fParam = 0.0f); + uint32_t Render(std::vector &triangles, uint32_t flags = RENDER_CULL_CW | RENDER_TEXTURED | RENDER_DEPTH); + uint32_t Render(std::vector &triangles, uint32_t flags, int nOffset, int nCount); + uint32_t RenderLine(olc::GFX3D::vec3d &p1, olc::GFX3D::vec3d &p2, olc::Pixel col = olc::WHITE); + uint32_t RenderCircleXZ(olc::GFX3D::vec3d &p1, float r, olc::Pixel col = olc::WHITE); + + private: + olc::GFX3D::mat4x4 matProj; + olc::GFX3D::mat4x4 matView; + olc::GFX3D::mat4x4 matWorld; + olc::Sprite *sprTexture; + //olc::GFX3D::MipMap *sprMipMap; + //bool bUseMipMap; + float fViewX; + float fViewY; + float fViewW; + float fViewH; + + struct sLight + { + uint32_t type; + olc::GFX3D::vec3d pos; + olc::GFX3D::vec3d dir; + olc::Pixel col; + float param; + } lights[4]; + }; + + + + public: + //static const int RF_TEXTURE = 0x00000001; + //static const int RF_ = 0x00000002; + + static void ConfigureDisplay(); + static void ClearDepth(); + static void AddTriangleToScene(olc::GFX3D::triangle &tri); + static void RenderScene(); + + static void DrawTriangleFlat(olc::GFX3D::triangle &tri); + static void DrawTriangleWire(olc::GFX3D::triangle &tri, olc::Pixel col = olc::WHITE); + static void DrawTriangleTex(olc::GFX3D::triangle &tri, olc::Sprite* spr); + static void TexturedTriangle(int x1, int y1, float u1, float v1, float w1, + int x2, int y2, float u2, float v2, float w2, + int x3, int y3, float u3, float v3, float w3, olc::Sprite* spr); + + static void RasterTriangle(int x1, int y1, float u1, float v1, float w1, olc::Pixel c1, + int x2, int y2, float u2, float v2, float w2, olc::Pixel c2, + int x3, int y3, float u3, float v3, float w3, olc::Pixel c3, + olc::Sprite* spr, + uint32_t nFlags); + + // Draws a sprite with the transform applied + //inline static void DrawSprite(olc::Sprite *sprite, olc::GFX2D::Transform2D &transform); + + private: + static float* m_DepthBuffer; + }; +} + +#endif + + +#ifdef OLC_PGEX_GRAPHICS3D +#undef OLC_PGEX_GRAPHICS3D + +namespace olc +{ + olc::GFX3D::Math::Math() + { + + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Mat_MultiplyVector(olc::GFX3D::mat4x4 &m, olc::GFX3D::vec3d &i) + { + vec3d v; + v.x = i.x * m.m[0][0] + i.y * m.m[1][0] + i.z * m.m[2][0] + i.w * m.m[3][0]; + v.y = i.x * m.m[0][1] + i.y * m.m[1][1] + i.z * m.m[2][1] + i.w * m.m[3][1]; + v.z = i.x * m.m[0][2] + i.y * m.m[1][2] + i.z * m.m[2][2] + i.w * m.m[3][2]; + v.w = i.x * m.m[0][3] + i.y * m.m[1][3] + i.z * m.m[2][3] + i.w * m.m[3][3]; + return v; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MakeIdentity() + { + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = 1.0f; + matrix.m[1][1] = 1.0f; + matrix.m[2][2] = 1.0f; + matrix.m[3][3] = 1.0f; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MakeRotationX(float fAngleRad) + { + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = 1.0f; + matrix.m[1][1] = cosf(fAngleRad); + matrix.m[1][2] = sinf(fAngleRad); + matrix.m[2][1] = -sinf(fAngleRad); + matrix.m[2][2] = cosf(fAngleRad); + matrix.m[3][3] = 1.0f; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MakeRotationY(float fAngleRad) + { + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = cosf(fAngleRad); + matrix.m[0][2] = sinf(fAngleRad); + matrix.m[2][0] = -sinf(fAngleRad); + matrix.m[1][1] = 1.0f; + matrix.m[2][2] = cosf(fAngleRad); + matrix.m[3][3] = 1.0f; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MakeRotationZ(float fAngleRad) + { + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = cosf(fAngleRad); + matrix.m[0][1] = sinf(fAngleRad); + matrix.m[1][0] = -sinf(fAngleRad); + matrix.m[1][1] = cosf(fAngleRad); + matrix.m[2][2] = 1.0f; + matrix.m[3][3] = 1.0f; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MakeScale(float x, float y, float z) + { + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = x; + matrix.m[1][1] = y; + matrix.m[2][2] = z; + matrix.m[3][3] = 1.0f; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MakeTranslation(float x, float y, float z) + { + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = 1.0f; + matrix.m[1][1] = 1.0f; + matrix.m[2][2] = 1.0f; + matrix.m[3][3] = 1.0f; + matrix.m[3][0] = x; + matrix.m[3][1] = y; + matrix.m[3][2] = z; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MakeProjection(float fFovDegrees, float fAspectRatio, float fNear, float fFar) + { + float fFovRad = 1.0f / tanf(fFovDegrees * 0.5f / 180.0f * 3.14159f); + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = fAspectRatio * fFovRad; + matrix.m[1][1] = fFovRad; + matrix.m[2][2] = fFar / (fFar - fNear); + matrix.m[3][2] = (-fFar * fNear) / (fFar - fNear); + matrix.m[2][3] = 1.0f; + matrix.m[3][3] = 0.0f; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_MultiplyMatrix(olc::GFX3D::mat4x4 &m1, olc::GFX3D::mat4x4 &m2) + { + olc::GFX3D::mat4x4 matrix; + for (int c = 0; c < 4; c++) + for (int r = 0; r < 4; r++) + matrix.m[r][c] = m1.m[r][0] * m2.m[0][c] + m1.m[r][1] * m2.m[1][c] + m1.m[r][2] * m2.m[2][c] + m1.m[r][3] * m2.m[3][c]; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_PointAt(olc::GFX3D::vec3d &pos, olc::GFX3D::vec3d &target, olc::GFX3D::vec3d &up) + { + // Calculate new forward direction + olc::GFX3D::vec3d newForward = Vec_Sub(target, pos); + newForward = Vec_Normalise(newForward); + + // Calculate new Up direction + olc::GFX3D::vec3d a = Vec_Mul(newForward, Vec_DotProduct(up, newForward)); + olc::GFX3D::vec3d newUp = Vec_Sub(up, a); + newUp = Vec_Normalise(newUp); + + // New Right direction is easy, its just cross product + olc::GFX3D::vec3d newRight = Vec_CrossProduct(newUp, newForward); + + // Construct Dimensioning and Translation Matrix + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = newRight.x; matrix.m[0][1] = newRight.y; matrix.m[0][2] = newRight.z; matrix.m[0][3] = 0.0f; + matrix.m[1][0] = newUp.x; matrix.m[1][1] = newUp.y; matrix.m[1][2] = newUp.z; matrix.m[1][3] = 0.0f; + matrix.m[2][0] = newForward.x; matrix.m[2][1] = newForward.y; matrix.m[2][2] = newForward.z; matrix.m[2][3] = 0.0f; + matrix.m[3][0] = pos.x; matrix.m[3][1] = pos.y; matrix.m[3][2] = pos.z; matrix.m[3][3] = 1.0f; + return matrix; + + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_QuickInverse(olc::GFX3D::mat4x4 &m) // Only for Rotation/Translation Matrices + { + olc::GFX3D::mat4x4 matrix; + matrix.m[0][0] = m.m[0][0]; matrix.m[0][1] = m.m[1][0]; matrix.m[0][2] = m.m[2][0]; matrix.m[0][3] = 0.0f; + matrix.m[1][0] = m.m[0][1]; matrix.m[1][1] = m.m[1][1]; matrix.m[1][2] = m.m[2][1]; matrix.m[1][3] = 0.0f; + matrix.m[2][0] = m.m[0][2]; matrix.m[2][1] = m.m[1][2]; matrix.m[2][2] = m.m[2][2]; matrix.m[2][3] = 0.0f; + matrix.m[3][0] = -(m.m[3][0] * matrix.m[0][0] + m.m[3][1] * matrix.m[1][0] + m.m[3][2] * matrix.m[2][0]); + matrix.m[3][1] = -(m.m[3][0] * matrix.m[0][1] + m.m[3][1] * matrix.m[1][1] + m.m[3][2] * matrix.m[2][1]); + matrix.m[3][2] = -(m.m[3][0] * matrix.m[0][2] + m.m[3][1] * matrix.m[1][2] + m.m[3][2] * matrix.m[2][2]); + matrix.m[3][3] = 1.0f; + return matrix; + } + + olc::GFX3D::mat4x4 olc::GFX3D::Math::Mat_Inverse(olc::GFX3D::mat4x4 &m) + { + double det; + + + mat4x4 matInv; + + matInv.m[0][0] = m.m[1][1] * m.m[2][2] * m.m[3][3] - m.m[1][1] * m.m[2][3] * m.m[3][2] - m.m[2][1] * m.m[1][2] * m.m[3][3] + m.m[2][1] * m.m[1][3] * m.m[3][2] + m.m[3][1] * m.m[1][2] * m.m[2][3] - m.m[3][1] * m.m[1][3] * m.m[2][2]; + matInv.m[1][0] = -m.m[1][0] * m.m[2][2] * m.m[3][3] + m.m[1][0] * m.m[2][3] * m.m[3][2] + m.m[2][0] * m.m[1][2] * m.m[3][3] - m.m[2][0] * m.m[1][3] * m.m[3][2] - m.m[3][0] * m.m[1][2] * m.m[2][3] + m.m[3][0] * m.m[1][3] * m.m[2][2]; + matInv.m[2][0] = m.m[1][0] * m.m[2][1] * m.m[3][3] - m.m[1][0] * m.m[2][3] * m.m[3][1] - m.m[2][0] * m.m[1][1] * m.m[3][3] + m.m[2][0] * m.m[1][3] * m.m[3][1] + m.m[3][0] * m.m[1][1] * m.m[2][3] - m.m[3][0] * m.m[1][3] * m.m[2][1]; + matInv.m[3][0] = -m.m[1][0] * m.m[2][1] * m.m[3][2] + m.m[1][0] * m.m[2][2] * m.m[3][1] + m.m[2][0] * m.m[1][1] * m.m[3][2] - m.m[2][0] * m.m[1][2] * m.m[3][1] - m.m[3][0] * m.m[1][1] * m.m[2][2] + m.m[3][0] * m.m[1][2] * m.m[2][1]; + matInv.m[0][1] = -m.m[0][1] * m.m[2][2] * m.m[3][3] + m.m[0][1] * m.m[2][3] * m.m[3][2] + m.m[2][1] * m.m[0][2] * m.m[3][3] - m.m[2][1] * m.m[0][3] * m.m[3][2] - m.m[3][1] * m.m[0][2] * m.m[2][3] + m.m[3][1] * m.m[0][3] * m.m[2][2]; + matInv.m[1][1] = m.m[0][0] * m.m[2][2] * m.m[3][3] - m.m[0][0] * m.m[2][3] * m.m[3][2] - m.m[2][0] * m.m[0][2] * m.m[3][3] + m.m[2][0] * m.m[0][3] * m.m[3][2] + m.m[3][0] * m.m[0][2] * m.m[2][3] - m.m[3][0] * m.m[0][3] * m.m[2][2]; + matInv.m[2][1] = -m.m[0][0] * m.m[2][1] * m.m[3][3] + m.m[0][0] * m.m[2][3] * m.m[3][1] + m.m[2][0] * m.m[0][1] * m.m[3][3] - m.m[2][0] * m.m[0][3] * m.m[3][1] - m.m[3][0] * m.m[0][1] * m.m[2][3] + m.m[3][0] * m.m[0][3] * m.m[2][1]; + matInv.m[3][1] = m.m[0][0] * m.m[2][1] * m.m[3][2] - m.m[0][0] * m.m[2][2] * m.m[3][1] - m.m[2][0] * m.m[0][1] * m.m[3][2] + m.m[2][0] * m.m[0][2] * m.m[3][1] + m.m[3][0] * m.m[0][1] * m.m[2][2] - m.m[3][0] * m.m[0][2] * m.m[2][1]; + matInv.m[0][2] = m.m[0][1] * m.m[1][2] * m.m[3][3] - m.m[0][1] * m.m[1][3] * m.m[3][2] - m.m[1][1] * m.m[0][2] * m.m[3][3] + m.m[1][1] * m.m[0][3] * m.m[3][2] + m.m[3][1] * m.m[0][2] * m.m[1][3] - m.m[3][1] * m.m[0][3] * m.m[1][2]; + matInv.m[1][2] = -m.m[0][0] * m.m[1][2] * m.m[3][3] + m.m[0][0] * m.m[1][3] * m.m[3][2] + m.m[1][0] * m.m[0][2] * m.m[3][3] - m.m[1][0] * m.m[0][3] * m.m[3][2] - m.m[3][0] * m.m[0][2] * m.m[1][3] + m.m[3][0] * m.m[0][3] * m.m[1][2]; + matInv.m[2][2] = m.m[0][0] * m.m[1][1] * m.m[3][3] - m.m[0][0] * m.m[1][3] * m.m[3][1] - m.m[1][0] * m.m[0][1] * m.m[3][3] + m.m[1][0] * m.m[0][3] * m.m[3][1] + m.m[3][0] * m.m[0][1] * m.m[1][3] - m.m[3][0] * m.m[0][3] * m.m[1][1]; + matInv.m[3][2] = -m.m[0][0] * m.m[1][1] * m.m[3][2] + m.m[0][0] * m.m[1][2] * m.m[3][1] + m.m[1][0] * m.m[0][1] * m.m[3][2] - m.m[1][0] * m.m[0][2] * m.m[3][1] - m.m[3][0] * m.m[0][1] * m.m[1][2] + m.m[3][0] * m.m[0][2] * m.m[1][1]; + matInv.m[0][3] = -m.m[0][1] * m.m[1][2] * m.m[2][3] + m.m[0][1] * m.m[1][3] * m.m[2][2] + m.m[1][1] * m.m[0][2] * m.m[2][3] - m.m[1][1] * m.m[0][3] * m.m[2][2] - m.m[2][1] * m.m[0][2] * m.m[1][3] + m.m[2][1] * m.m[0][3] * m.m[1][2]; + matInv.m[1][3] = m.m[0][0] * m.m[1][2] * m.m[2][3] - m.m[0][0] * m.m[1][3] * m.m[2][2] - m.m[1][0] * m.m[0][2] * m.m[2][3] + m.m[1][0] * m.m[0][3] * m.m[2][2] + m.m[2][0] * m.m[0][2] * m.m[1][3] - m.m[2][0] * m.m[0][3] * m.m[1][2]; + matInv.m[2][3] = -m.m[0][0] * m.m[1][1] * m.m[2][3] + m.m[0][0] * m.m[1][3] * m.m[2][1] + m.m[1][0] * m.m[0][1] * m.m[2][3] - m.m[1][0] * m.m[0][3] * m.m[2][1] - m.m[2][0] * m.m[0][1] * m.m[1][3] + m.m[2][0] * m.m[0][3] * m.m[1][1]; + matInv.m[3][3] = m.m[0][0] * m.m[1][1] * m.m[2][2] - m.m[0][0] * m.m[1][2] * m.m[2][1] - m.m[1][0] * m.m[0][1] * m.m[2][2] + m.m[1][0] * m.m[0][2] * m.m[2][1] + m.m[2][0] * m.m[0][1] * m.m[1][2] - m.m[2][0] * m.m[0][2] * m.m[1][1]; + + det = m.m[0][0] * matInv.m[0][0] + m.m[0][1] * matInv.m[1][0] + m.m[0][2] * matInv.m[2][0] + m.m[0][3] * matInv.m[3][0]; + // if (det == 0) return false; + + det = 1.0 / det; + + for (int i = 0; i < 4; i++) + for (int j = 0; j < 4; j++) + matInv.m[i][j] *= (float)det; + + return matInv; + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Vec_Add(olc::GFX3D::vec3d &v1, olc::GFX3D::vec3d &v2) + { + return { v1.x + v2.x, v1.y + v2.y, v1.z + v2.z }; + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Vec_Sub(olc::GFX3D::vec3d &v1, olc::GFX3D::vec3d &v2) + { + return { v1.x - v2.x, v1.y - v2.y, v1.z - v2.z }; + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Vec_Mul(olc::GFX3D::vec3d &v1, float k) + { + return { v1.x * k, v1.y * k, v1.z * k }; + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Vec_Div(olc::GFX3D::vec3d &v1, float k) + { + return { v1.x / k, v1.y / k, v1.z / k }; + } + + float olc::GFX3D::Math::Vec_DotProduct(olc::GFX3D::vec3d &v1, olc::GFX3D::vec3d &v2) + { + return v1.x*v2.x + v1.y*v2.y + v1.z * v2.z; + } + + float olc::GFX3D::Math::Vec_Length(olc::GFX3D::vec3d &v) + { + return sqrtf(Vec_DotProduct(v, v)); + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Vec_Normalise(olc::GFX3D::vec3d &v) + { + float l = Vec_Length(v); + return { v.x / l, v.y / l, v.z / l }; + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Vec_CrossProduct(olc::GFX3D::vec3d &v1, olc::GFX3D::vec3d &v2) + { + vec3d v; + v.x = v1.y * v2.z - v1.z * v2.y; + v.y = v1.z * v2.x - v1.x * v2.z; + v.z = v1.x * v2.y - v1.y * v2.x; + return v; + } + + olc::GFX3D::vec3d olc::GFX3D::Math::Vec_IntersectPlane(olc::GFX3D::vec3d &plane_p, olc::GFX3D::vec3d &plane_n, olc::GFX3D::vec3d &lineStart, olc::GFX3D::vec3d &lineEnd, float &t) + { + plane_n = Vec_Normalise(plane_n); + float plane_d = -Vec_DotProduct(plane_n, plane_p); + float ad = Vec_DotProduct(lineStart, plane_n); + float bd = Vec_DotProduct(lineEnd, plane_n); + t = (-plane_d - ad) / (bd - ad); + olc::GFX3D::vec3d lineStartToEnd = Vec_Sub(lineEnd, lineStart); + olc::GFX3D::vec3d lineToIntersect = Vec_Mul(lineStartToEnd, t); + return Vec_Add(lineStart, lineToIntersect); + } + + + int olc::GFX3D::Math::Triangle_ClipAgainstPlane(vec3d plane_p, vec3d plane_n, triangle &in_tri, triangle &out_tri1, triangle &out_tri2) + { + // Make sure plane normal is indeed normal + plane_n = Math::Vec_Normalise(plane_n); + + out_tri1.t[0] = in_tri.t[0]; + out_tri2.t[0] = in_tri.t[0]; + out_tri1.t[1] = in_tri.t[1]; + out_tri2.t[1] = in_tri.t[1]; + out_tri1.t[2] = in_tri.t[2]; + out_tri2.t[2] = in_tri.t[2]; + + // Return signed shortest distance from point to plane, plane normal must be normalised + auto dist = [&](vec3d &p) + { + vec3d n = Math::Vec_Normalise(p); + return (plane_n.x * p.x + plane_n.y * p.y + plane_n.z * p.z - Math::Vec_DotProduct(plane_n, plane_p)); + }; + + // Create two temporary storage arrays to classify points either side of plane + // If distance sign is positive, point lies on "inside" of plane + vec3d* inside_points[3]; int nInsidePointCount = 0; + vec3d* outside_points[3]; int nOutsidePointCount = 0; + vec2d* inside_tex[3]; int nInsideTexCount = 0; + vec2d* outside_tex[3]; int nOutsideTexCount = 0; + + + // Get signed distance of each point in triangle to plane + float d0 = dist(in_tri.p[0]); + float d1 = dist(in_tri.p[1]); + float d2 = dist(in_tri.p[2]); + + if (d0 >= 0) { inside_points[nInsidePointCount++] = &in_tri.p[0]; inside_tex[nInsideTexCount++] = &in_tri.t[0]; } + else { + outside_points[nOutsidePointCount++] = &in_tri.p[0]; outside_tex[nOutsideTexCount++] = &in_tri.t[0]; + } + if (d1 >= 0) { + inside_points[nInsidePointCount++] = &in_tri.p[1]; inside_tex[nInsideTexCount++] = &in_tri.t[1]; + } + else { + outside_points[nOutsidePointCount++] = &in_tri.p[1]; outside_tex[nOutsideTexCount++] = &in_tri.t[1]; + } + if (d2 >= 0) { + inside_points[nInsidePointCount++] = &in_tri.p[2]; inside_tex[nInsideTexCount++] = &in_tri.t[2]; + } + else { + outside_points[nOutsidePointCount++] = &in_tri.p[2]; outside_tex[nOutsideTexCount++] = &in_tri.t[2]; + } + + // Now classify triangle points, and break the input triangle into + // smaller output triangles if required. There are four possible + // outcomes... + + if (nInsidePointCount == 0) + { + // All points lie on the outside of plane, so clip whole triangle + // It ceases to exist + + return 0; // No returned triangles are valid + } + + if (nInsidePointCount == 3) + { + // All points lie on the inside of plane, so do nothing + // and allow the triangle to simply pass through + out_tri1 = in_tri; + + return 1; // Just the one returned original triangle is valid + } + + if (nInsidePointCount == 1 && nOutsidePointCount == 2) + { + // Triangle should be clipped. As two points lie outside + // the plane, the triangle simply becomes a smaller triangle + + // Copy appearance info to new triangle + out_tri1.col[0] = in_tri.col[0]; + out_tri1.col[1] = in_tri.col[1]; + out_tri1.col[2] = in_tri.col[2]; + + // The inside point is valid, so keep that... + out_tri1.p[0] = *inside_points[0]; + out_tri1.t[0] = *inside_tex[0]; + + // but the two new points are at the locations where the + // original sides of the triangle (lines) intersect with the plane + float t; + out_tri1.p[1] = Math::Vec_IntersectPlane(plane_p, plane_n, *inside_points[0], *outside_points[0], t); + out_tri1.t[1].x = t * (outside_tex[0]->x - inside_tex[0]->x) + inside_tex[0]->x; + out_tri1.t[1].y = t * (outside_tex[0]->y - inside_tex[0]->y) + inside_tex[0]->y; + out_tri1.t[1].z = t * (outside_tex[0]->z - inside_tex[0]->z) + inside_tex[0]->z; + + out_tri1.p[2] = Math::Vec_IntersectPlane(plane_p, plane_n, *inside_points[0], *outside_points[1], t); + out_tri1.t[2].x = t * (outside_tex[1]->x - inside_tex[0]->x) + inside_tex[0]->x; + out_tri1.t[2].y = t * (outside_tex[1]->y - inside_tex[0]->y) + inside_tex[0]->y; + out_tri1.t[2].z = t * (outside_tex[1]->z - inside_tex[0]->z) + inside_tex[0]->z; + + return 1; // Return the newly formed single triangle + } + + if (nInsidePointCount == 2 && nOutsidePointCount == 1) + { + // Triangle should be clipped. As two points lie inside the plane, + // the clipped triangle becomes a "quad". Fortunately, we can + // represent a quad with two new triangles + + // Copy appearance info to new triangles + out_tri1.col[0] = in_tri.col[0]; + out_tri2.col[0] = in_tri.col[0]; + out_tri1.col[1] = in_tri.col[1]; + out_tri2.col[1] = in_tri.col[1]; + out_tri1.col[2] = in_tri.col[2]; + out_tri2.col[2] = in_tri.col[2]; + + // The first triangle consists of the two inside points and a new + // point determined by the location where one side of the triangle + // intersects with the plane + out_tri1.p[0] = *inside_points[0]; + out_tri1.t[0] = *inside_tex[0]; + + out_tri1.p[1] = *inside_points[1]; + out_tri1.t[1] = *inside_tex[1]; + + float t; + out_tri1.p[2] = Math::Vec_IntersectPlane(plane_p, plane_n, *inside_points[0], *outside_points[0], t); + out_tri1.t[2].x = t * (outside_tex[0]->x - inside_tex[0]->x) + inside_tex[0]->x; + out_tri1.t[2].y = t * (outside_tex[0]->y - inside_tex[0]->y) + inside_tex[0]->y; + out_tri1.t[2].z = t * (outside_tex[0]->z - inside_tex[0]->z) + inside_tex[0]->z; + + // The second triangle is composed of one of he inside points, a + // new point determined by the intersection of the other side of the + // triangle and the plane, and the newly created point above + out_tri2.p[1] = *inside_points[1]; + out_tri2.t[1] = *inside_tex[1]; + out_tri2.p[0] = out_tri1.p[2]; + out_tri2.t[0] = out_tri1.t[2]; + out_tri2.p[2] = Math::Vec_IntersectPlane(plane_p, plane_n, *inside_points[1], *outside_points[0], t); + out_tri2.t[2].x = t * (outside_tex[0]->x - inside_tex[1]->x) + inside_tex[1]->x; + out_tri2.t[2].y = t * (outside_tex[0]->y - inside_tex[1]->y) + inside_tex[1]->y; + out_tri2.t[2].z = t * (outside_tex[0]->z - inside_tex[1]->z) + inside_tex[1]->z; + return 2; // Return two newly formed triangles which form a quad + } + + return 0; + } + + void GFX3D::DrawTriangleFlat(olc::GFX3D::triangle &tri) + { + pge->FillTriangle((int32_t)tri.p[0].x, (int32_t)tri.p[0].y, (int32_t)tri.p[1].x, (int32_t)tri.p[1].y, (int32_t)tri.p[2].x, (int32_t)tri.p[2].y, tri.col[0]); + } + + void GFX3D::DrawTriangleWire(olc::GFX3D::triangle &tri, olc::Pixel col) + { + pge->DrawTriangle((int32_t)tri.p[0].x, (int32_t)tri.p[0].y, (int32_t)tri.p[1].x, (int32_t)tri.p[1].y, (int32_t)tri.p[2].x, (int32_t)tri.p[2].y, col); + } + + void GFX3D::TexturedTriangle(int x1, int y1, float u1, float v1, float w1, + int x2, int y2, float u2, float v2, float w2, + int x3, int y3, float u3, float v3, float w3, olc::Sprite* spr) + + { + if (y2 < y1) + { + std::swap(y1, y2); + std::swap(x1, x2); + std::swap(u1, u2); + std::swap(v1, v2); + std::swap(w1, w2); + } + + if (y3 < y1) + { + std::swap(y1, y3); + std::swap(x1, x3); + std::swap(u1, u3); + std::swap(v1, v3); + std::swap(w1, w3); + } + + if (y3 < y2) + { + std::swap(y2, y3); + std::swap(x2, x3); + std::swap(u2, u3); + std::swap(v2, v3); + std::swap(w2, w3); + } + + int dy1 = y2 - y1; + int dx1 = x2 - x1; + float dv1 = v2 - v1; + float du1 = u2 - u1; + float dw1 = w2 - w1; + + int dy2 = y3 - y1; + int dx2 = x3 - x1; + float dv2 = v3 - v1; + float du2 = u3 - u1; + float dw2 = w3 - w1; + + float tex_u, tex_v, tex_w; + + float dax_step = 0, dbx_step = 0, + du1_step = 0, dv1_step = 0, + du2_step = 0, dv2_step = 0, + dw1_step = 0, dw2_step = 0; + + if (dy1) dax_step = dx1 / (float)abs(dy1); + if (dy2) dbx_step = dx2 / (float)abs(dy2); + + if (dy1) du1_step = du1 / (float)abs(dy1); + if (dy1) dv1_step = dv1 / (float)abs(dy1); + if (dy1) dw1_step = dw1 / (float)abs(dy1); + + if (dy2) du2_step = du2 / (float)abs(dy2); + if (dy2) dv2_step = dv2 / (float)abs(dy2); + if (dy2) dw2_step = dw2 / (float)abs(dy2); + + if (dy1) + { + for (int i = y1; i <= y2; i++) + { + int ax = int(x1 + (float)(i - y1) * dax_step); + int bx = int(x1 + (float)(i - y1) * dbx_step); + + float tex_su = u1 + (float)(i - y1) * du1_step; + float tex_sv = v1 + (float)(i - y1) * dv1_step; + float tex_sw = w1 + (float)(i - y1) * dw1_step; + + float tex_eu = u1 + (float)(i - y1) * du2_step; + float tex_ev = v1 + (float)(i - y1) * dv2_step; + float tex_ew = w1 + (float)(i - y1) * dw2_step; + + if (ax > bx) + { + std::swap(ax, bx); + std::swap(tex_su, tex_eu); + std::swap(tex_sv, tex_ev); + std::swap(tex_sw, tex_ew); + } + + tex_u = tex_su; + tex_v = tex_sv; + tex_w = tex_sw; + + float tstep = 1.0f / ((float)(bx - ax)); + float t = 0.0f; + + for (int j = ax; j < bx; j++) + { + tex_u = (1.0f - t) * tex_su + t * tex_eu; + tex_v = (1.0f - t) * tex_sv + t * tex_ev; + tex_w = (1.0f - t) * tex_sw + t * tex_ew; + if (tex_w > m_DepthBuffer[i*pge->ScreenWidth() + j]) + { + /*if (bMipMap) + pge->Draw(j, i, ((olc::GFX3D::MipMap*)spr)->Sample(tex_u / tex_w, tex_v / tex_w, tex_w)); + else*/ + if(pge->Draw(j, i, spr != nullptr ? spr->Sample(tex_u / tex_w, tex_v / tex_w) : olc::GREY)) + m_DepthBuffer[i*pge->ScreenWidth() + j] = tex_w; + } + t += tstep; + } + + } + } + + dy1 = y3 - y2; + dx1 = x3 - x2; + dv1 = v3 - v2; + du1 = u3 - u2; + dw1 = w3 - w2; + + if (dy1) dax_step = dx1 / (float)abs(dy1); + if (dy2) dbx_step = dx2 / (float)abs(dy2); + + du1_step = 0, dv1_step = 0; + if (dy1) du1_step = du1 / (float)abs(dy1); + if (dy1) dv1_step = dv1 / (float)abs(dy1); + if (dy1) dw1_step = dw1 / (float)abs(dy1); + + if (dy1) + { + for (int i = y2; i <= y3; i++) + { + int ax = int(x2 + (float)(i - y2) * dax_step); + int bx = int(x1 + (float)(i - y1) * dbx_step); + + float tex_su = u2 + (float)(i - y2) * du1_step; + float tex_sv = v2 + (float)(i - y2) * dv1_step; + float tex_sw = w2 + (float)(i - y2) * dw1_step; + + float tex_eu = u1 + (float)(i - y1) * du2_step; + float tex_ev = v1 + (float)(i - y1) * dv2_step; + float tex_ew = w1 + (float)(i - y1) * dw2_step; + + if (ax > bx) + { + std::swap(ax, bx); + std::swap(tex_su, tex_eu); + std::swap(tex_sv, tex_ev); + std::swap(tex_sw, tex_ew); + } + + tex_u = tex_su; + tex_v = tex_sv; + tex_w = tex_sw; + + float tstep = 1.0f / ((float)(bx - ax)); + float t = 0.0f; + + for (int j = ax; j < bx; j++) + { + tex_u = (1.0f - t) * tex_su + t * tex_eu; + tex_v = (1.0f - t) * tex_sv + t * tex_ev; + tex_w = (1.0f - t) * tex_sw + t * tex_ew; + + if (tex_w > m_DepthBuffer[i*pge->ScreenWidth() + j]) + { + /*if(bMipMap) + pge->Draw(j, i, ((olc::GFX3D::MipMap*)spr)->Sample(tex_u / tex_w, tex_v / tex_w, tex_w)); + else*/ + if(pge->Draw(j, i, spr != nullptr ? spr->Sample(tex_u / tex_w, tex_v / tex_w) : olc::GREY)) + m_DepthBuffer[i*pge->ScreenWidth() + j] = tex_w; + } + t += tstep; + } + } + } + } + + + void GFX3D::DrawTriangleTex(olc::GFX3D::triangle &tri, olc::Sprite* spr) + { + + } + + float* GFX3D::m_DepthBuffer = nullptr; + + void GFX3D::ConfigureDisplay() + { + m_DepthBuffer = new float[pge->ScreenWidth() * pge->ScreenHeight()]{ 0 }; + } + + + void GFX3D::ClearDepth() + { + memset(m_DepthBuffer, 0, pge->ScreenWidth() * pge->ScreenHeight() * sizeof(float)); + } + + bool GFX3D::mesh::LoadOBJFile(std::string sFilename, bool bHasTexture) + { + std::ifstream f(sFilename); + if (!f.is_open()) return false; + + // Local cache of verts + std::vector verts; + std::vector norms; + std::vector texs; + + while (!f.eof()) + { + char line[128]; + f.getline(line, 128); + + std::stringstream s; + s << line; + + char junk; + + if (line[0] == 'v') + { + if (line[1] == 't') + { + vec2d v; + s >> junk >> junk >> v.x >> v.y; + //v.x = 1.0f - v.x; + v.y = 1.0f - v.y; + texs.push_back(v); + } + else if (line[1] == 'n') + { + vec3d v; + s >> junk >> junk >> v.x >> v.y >> v.z; + norms.push_back(v); + } + else + { + vec3d v; + s >> junk >> v.x >> v.y >> v.z; + verts.push_back(v); + } + } + + + /*if (!bHasTexture) + { + if (line[0] == 'f') + { + int f[3]; + s >> junk >> f[0] >> f[1] >> f[2]; + tris.push_back({ verts[f[0] - 1], verts[f[1] - 1], verts[f[2] - 1] }); + } + } + else*/ + { + if (line[0] == 'f') + { + s >> junk; + + std::string tokens[9]; + int nTokenCount = -1; + while (!s.eof()) + { + char c = s.get(); + if (c == ' ' || c == '/') + { + if (tokens[nTokenCount].size() > 0) + { + nTokenCount++; + } + } + else + tokens[nTokenCount].append(1, c); + } + + tokens[nTokenCount].pop_back(); + + int stride = 1; + if (!texs.empty()) stride++; + if (!norms.empty()) stride++; + + if (!texs.empty()) + { + tris.push_back({ + verts[stoi(tokens[0 * stride]) - 1], + verts[stoi(tokens[1 * stride]) - 1], + verts[stoi(tokens[2 * stride]) - 1], + texs[stoi(tokens[0 * stride + 1]) - 1], + texs[stoi(tokens[1 * stride + 1]) - 1], + texs[stoi(tokens[2 * stride + 1]) - 1], + olc::WHITE, olc::WHITE, olc::WHITE}); + } + else + { + tris.push_back({ + verts[stoi(tokens[0 * stride]) - 1], + verts[stoi(tokens[1 * stride]) - 1], + verts[stoi(tokens[2 * stride]) - 1], + olc::GFX3D::vec2d{0,0,0}, + olc::GFX3D::vec2d{0,0,0}, + olc::GFX3D::vec2d{0,0,0}, + olc::WHITE, olc::WHITE, olc::WHITE }); + + } + } + } + } + return true; + } + + + GFX3D::PipeLine::PipeLine() + { + //bUseMipMap = false; + } + + void GFX3D::PipeLine::SetProjection(float fFovDegrees, float fAspectRatio, float fNear, float fFar, float fLeft, float fTop, float fWidth, float fHeight) + { + matProj = GFX3D::Math::Mat_MakeProjection(fFovDegrees, fAspectRatio, fNear, fFar); + fViewX = fLeft; + fViewY = fTop; + fViewW = fWidth; + fViewH = fHeight; + } + + void GFX3D::PipeLine::SetCamera(olc::GFX3D::vec3d &pos, olc::GFX3D::vec3d &lookat, olc::GFX3D::vec3d &up) + { + matView = GFX3D::Math::Mat_PointAt(pos, lookat, up); + matView = GFX3D::Math::Mat_QuickInverse(matView); + } + + void GFX3D::PipeLine::SetTransform(olc::GFX3D::mat4x4 &transform) + { + matWorld = transform; + } + + void GFX3D::PipeLine::SetTexture(olc::Sprite *texture) + { + sprTexture = texture; + //bUseMipMap = false; + } + + /*void GFX3D::PipeLine::SetMipMapTexture(olc::GFX3D::MipMap *texture) + { + sprMipMap = texture; + bUseMipMap = true; + }*/ + + void GFX3D::PipeLine::SetLightSource(uint32_t nSlot, uint32_t nType, olc::Pixel col, olc::GFX3D::vec3d pos, olc::GFX3D::vec3d dir, float fParam) + { + if (nSlot < 4) + { + lights[nSlot].type = nType; + lights[nSlot].pos = pos; + lights[nSlot].dir = dir; + lights[nSlot].col = col; + lights[nSlot].param = fParam; + } + } + + uint32_t GFX3D::PipeLine::Render(std::vector &triangles, uint32_t flags) + { + return Render(triangles, flags, 0, triangles.size()); + } + + uint32_t GFX3D::PipeLine::RenderLine(olc::GFX3D::vec3d &p1, olc::GFX3D::vec3d &p2, olc::Pixel col) + { + // Coordinates are assumed to be in world space + olc::GFX3D::vec3d t1, t2; + + // Transform into view + t1 = GFX3D::Math::Mat_MultiplyVector(matView, p1); + t2 = GFX3D::Math::Mat_MultiplyVector(matView, p2); + + // Project onto screen + t1 = GFX3D::Math::Mat_MultiplyVector(matProj, t1); + t2 = GFX3D::Math::Mat_MultiplyVector(matProj, t2); + + // Project + t1.x = t1.x / t1.w; + t1.y = t1.y / t1.w; + t1.z = t1.z / t1.w; + + t2.x = t2.x / t2.w; + t2.y = t2.y / t2.w; + t2.z = t2.z / t2.w; + + vec3d vOffsetView = { 1,1,0 }; + t1 = Math::Vec_Add(t1, vOffsetView); + t2 = Math::Vec_Add(t2, vOffsetView); + + t1.x *= 0.5f * fViewW; + t1.y *= 0.5f * fViewH; + t2.x *= 0.5f * fViewW; + t2.y *= 0.5f * fViewH; + + vOffsetView = { fViewX,fViewY,0 }; + t1 = Math::Vec_Add(t1, vOffsetView); + t2 = Math::Vec_Add(t2, vOffsetView); + + pge->DrawLine((int32_t)t1.x, (int32_t)t1.y, (int32_t)t2.x, (int32_t)t2.y, col); + + return 0; + } + + uint32_t GFX3D::PipeLine::RenderCircleXZ(olc::GFX3D::vec3d &p1, float r, olc::Pixel col) + { + // Coordinates are assumed to be in world space + olc::GFX3D::vec3d t1; + olc::GFX3D::vec3d t2 = { p1.x + r, p1.y, p1.z }; + + // Transform into view + t1 = GFX3D::Math::Mat_MultiplyVector(matView, p1); + t2 = GFX3D::Math::Mat_MultiplyVector(matView, t2); + + // Project onto screen + t1 = GFX3D::Math::Mat_MultiplyVector(matProj, t1); + t2 = GFX3D::Math::Mat_MultiplyVector(matProj, t2); + + // Project + t1.x = t1.x / t1.w; + t1.y = t1.y / t1.w; + t1.z = t1.z / t1.w; + + t2.x = t2.x / t2.w; + t2.y = t2.y / t2.w; + t2.z = t2.z / t2.w; + + vec3d vOffsetView = { 1,1,0 }; + t1 = Math::Vec_Add(t1, vOffsetView); + t2 = Math::Vec_Add(t2, vOffsetView); + + t1.x *= 0.5f * fViewW; + t1.y *= 0.5f * fViewH; + t2.x *= 0.5f * fViewW; + t2.y *= 0.5f * fViewH; + + vOffsetView = { fViewX,fViewY,0 }; + t1 = Math::Vec_Add(t1, vOffsetView); + t2 = Math::Vec_Add(t2, vOffsetView); + + pge->FillCircle((int32_t)t1.x, (int32_t)t1.y, (int32_t)fabs(t2.x - t1.x), col); + + return 0; + } + + uint32_t GFX3D::PipeLine::Render(std::vector &triangles, uint32_t flags, int nOffset, int nCount) + { + // Calculate Transformation Matrix + mat4x4 matWorldView = Math::Mat_MultiplyMatrix(matWorld, matView); + //matWorldViewProj = Math::Mat_MultiplyMatrix(matWorldView, matProj); + + // Store triangles for rastering later + std::vector vecTrianglesToRaster; + + int nTriangleDrawnCount = 0; + + // Process Triangles + //for (auto &tri : triangles) + // omp_set_dynamic(0); + // omp_set_num_threads(4); + //#pragma omp parallel for schedule(static) + for(int tx = nOffset; tx < nOffset+nCount; tx++) + { + GFX3D::triangle &tri = triangles[tx]; + GFX3D::triangle triTransformed; + + // Just copy through texture coordinates + triTransformed.t[0] = { tri.t[0].x, tri.t[0].y, tri.t[0].z }; + triTransformed.t[1] = { tri.t[1].x, tri.t[1].y, tri.t[1].z }; + triTransformed.t[2] = { tri.t[2].x, tri.t[2].y, tri.t[2].z }; // Think! + + // Dont forget vertex colours + triTransformed.col[0] = tri.col[0]; + triTransformed.col[1] = tri.col[1]; + triTransformed.col[2] = tri.col[2]; + + // Transform Triangle from object into projected space + triTransformed.p[0] = GFX3D::Math::Mat_MultiplyVector(matWorldView, tri.p[0]); + triTransformed.p[1] = GFX3D::Math::Mat_MultiplyVector(matWorldView, tri.p[1]); + triTransformed.p[2] = GFX3D::Math::Mat_MultiplyVector(matWorldView, tri.p[2]); + + // Calculate Triangle Normal in WorldView Space + GFX3D::vec3d normal, line1, line2; + line1 = GFX3D::Math::Vec_Sub(triTransformed.p[1], triTransformed.p[0]); + line2 = GFX3D::Math::Vec_Sub(triTransformed.p[2], triTransformed.p[0]); + normal = GFX3D::Math::Vec_CrossProduct(line1, line2); + normal = GFX3D::Math::Vec_Normalise(normal); + + // Cull triangles that face away from viewer + if (flags & RENDER_CULL_CW && GFX3D::Math::Vec_DotProduct(normal, triTransformed.p[0]) > 0.0f) { + continue; + } + if (flags & RENDER_CULL_CCW && GFX3D::Math::Vec_DotProduct(normal, triTransformed.p[0]) < 0.0f) { + continue; + } + + // If Lighting, calculate shading + if (flags & RENDER_LIGHTS) + { + olc::Pixel ambient_clamp = { 0,0,0 }; + olc::Pixel light_combined = { 0,0,0 }; + uint32_t nLightSources = 0; + float nLightR = 0, nLightG = 0, nLightB = 0; + + for (int i = 0; i < 4; i++) + { + switch (lights[i].type) + { + case LIGHT_DISABLED: + break; + case LIGHT_AMBIENT: + ambient_clamp = lights[i].col; + break; + case LIGHT_DIRECTIONAL: + { + nLightSources++; + GFX3D::vec3d light_dir = GFX3D::Math::Vec_Normalise(lights[i].dir); + float light = GFX3D::Math::Vec_DotProduct(light_dir, normal); + if (light > 0) + { + int j = 0; + } + + light = std::max(light, 0.0f); + nLightR += light * (lights[i].col.r/255.0f); + nLightG += light * (lights[i].col.g/255.0f); + nLightB += light * (lights[i].col.b/255.0f); + } + break; + case LIGHT_POINT: + break; + } + } + + //nLightR /= nLightSources; + //nLightG /= nLightSources; + //nLightB /= nLightSources; + + nLightR = std::max(nLightR, ambient_clamp.r / 255.0f); + nLightG = std::max(nLightG, ambient_clamp.g / 255.0f); + nLightB = std::max(nLightB, ambient_clamp.b / 255.0f); + + triTransformed.col[0] = olc::Pixel(uint8_t(nLightR * triTransformed.col[0].r), uint8_t(nLightG * triTransformed.col[0].g), uint8_t(nLightB * triTransformed.col[0].b)); + triTransformed.col[1] = olc::Pixel(uint8_t(nLightR * triTransformed.col[1].r), uint8_t(nLightG * triTransformed.col[1].g), uint8_t(nLightB * triTransformed.col[1].b)); + triTransformed.col[2] = olc::Pixel(uint8_t(nLightR * triTransformed.col[2].r), uint8_t(nLightG * triTransformed.col[2].g), uint8_t(nLightB * triTransformed.col[2].b)); + + + + /*GFX3D::vec3d light_dir = { 1,1,1 }; + light_dir = GFX3D::Math::Vec_Normalise(light_dir); + float light = GFX3D::Math::Vec_DotProduct(light_dir, normal); + if (light < 0) light = 0; + triTransformed.col[0] = olc::Pixel(light * 255.0f, light * 255.0f, light * 255.0f); + triTransformed.col[1] = olc::Pixel(light * 255.0f, light * 255.0f, light * 255.0f); + triTransformed.col[2] = olc::Pixel(light * 255.0f, light * 255.0f, light * 255.0f);*/ + } + //else + // triTransformed.col = olc::WHITE; + + // Clip triangle against near plane + int nClippedTriangles = 0; + GFX3D::triangle clipped[2]; + nClippedTriangles = GFX3D::Math::Triangle_ClipAgainstPlane({ 0.0f, 0.0f, 0.1f }, { 0.0f, 0.0f, 1.0f }, triTransformed, clipped[0], clipped[1]); + + // This may yield two new triangles + for (int n = 0; n < nClippedTriangles; n++) + { + triangle triProjected = clipped[n]; + + // Project new triangle + triProjected.p[0] = GFX3D::Math::Mat_MultiplyVector(matProj, clipped[n].p[0]); + triProjected.p[1] = GFX3D::Math::Mat_MultiplyVector(matProj, clipped[n].p[1]); + triProjected.p[2] = GFX3D::Math::Mat_MultiplyVector(matProj, clipped[n].p[2]); + + // Apply Projection to Verts + triProjected.p[0].x = triProjected.p[0].x / triProjected.p[0].w; + triProjected.p[1].x = triProjected.p[1].x / triProjected.p[1].w; + triProjected.p[2].x = triProjected.p[2].x / triProjected.p[2].w; + + triProjected.p[0].y = triProjected.p[0].y / triProjected.p[0].w; + triProjected.p[1].y = triProjected.p[1].y / triProjected.p[1].w; + triProjected.p[2].y = triProjected.p[2].y / triProjected.p[2].w; + + triProjected.p[0].z = triProjected.p[0].z / triProjected.p[0].w; + triProjected.p[1].z = triProjected.p[1].z / triProjected.p[1].w; + triProjected.p[2].z = triProjected.p[2].z / triProjected.p[2].w; + + // Apply Projection to Tex coords + triProjected.t[0].x = triProjected.t[0].x / triProjected.p[0].w; + triProjected.t[1].x = triProjected.t[1].x / triProjected.p[1].w; + triProjected.t[2].x = triProjected.t[2].x / triProjected.p[2].w; + + triProjected.t[0].y = triProjected.t[0].y / triProjected.p[0].w; + triProjected.t[1].y = triProjected.t[1].y / triProjected.p[1].w; + triProjected.t[2].y = triProjected.t[2].y / triProjected.p[2].w; + + triProjected.t[0].z = 1.0f / triProjected.p[0].w; + triProjected.t[1].z = 1.0f / triProjected.p[1].w; + triProjected.t[2].z = 1.0f / triProjected.p[2].w; + + // Clip against viewport in screen space + // Clip triangles against all four screen edges, this could yield + // a bunch of triangles, so create a queue that we traverse to + // ensure we only test new triangles generated against planes + GFX3D::triangle sclipped[2]; + std::list listTriangles; + + + // Add initial triangle + listTriangles.push_back(triProjected); + int nNewTriangles = 1; + + for (int p = 0; p < 4; p++) + { + int nTrisToAdd = 0; + while (nNewTriangles > 0) + { + // Take triangle from front of queue + triangle test = listTriangles.front(); + listTriangles.pop_front(); + nNewTriangles--; + + // Clip it against a plane. We only need to test each + // subsequent plane, against subsequent new triangles + // as all triangles after a plane clip are guaranteed + // to lie on the inside of the plane. I like how this + // comment is almost completely and utterly justified + switch (p) + { + case 0: nTrisToAdd = GFX3D::Math::Triangle_ClipAgainstPlane({ 0.0f, -1.0f, 0.0f }, { 0.0f, 1.0f, 0.0f }, test, sclipped[0], sclipped[1]); break; + case 1: nTrisToAdd = GFX3D::Math::Triangle_ClipAgainstPlane({ 0.0f, +1.0f, 0.0f }, { 0.0f, -1.0f, 0.0f }, test, sclipped[0], sclipped[1]); break; + case 2: nTrisToAdd = GFX3D::Math::Triangle_ClipAgainstPlane({ -1.0f, 0.0f, 0.0f }, { 1.0f, 0.0f, 0.0f }, test, sclipped[0], sclipped[1]); break; + case 3: nTrisToAdd = GFX3D::Math::Triangle_ClipAgainstPlane({ +1.0f, 0.0f, 0.0f }, { -1.0f, 0.0f, 0.0f }, test, sclipped[0], sclipped[1]); break; + } + + + // Clipping may yield a variable number of triangles, so + // add these new ones to the back of the queue for subsequent + // clipping against next planes + for (int w = 0; w < nTrisToAdd; w++) + listTriangles.push_back(sclipped[w]); + } + nNewTriangles = listTriangles.size(); + } + + for (auto &triRaster : listTriangles) + { + // Scale to viewport + /*triRaster.p[0].x *= -1.0f; + triRaster.p[1].x *= -1.0f; + triRaster.p[2].x *= -1.0f; + triRaster.p[0].y *= -1.0f; + triRaster.p[1].y *= -1.0f; + triRaster.p[2].y *= -1.0f;*/ + vec3d vOffsetView = { 1,1,0 }; + triRaster.p[0] = Math::Vec_Add(triRaster.p[0], vOffsetView); + triRaster.p[1] = Math::Vec_Add(triRaster.p[1], vOffsetView); + triRaster.p[2] = Math::Vec_Add(triRaster.p[2], vOffsetView); + triRaster.p[0].x *= 0.5f * fViewW; + triRaster.p[0].y *= 0.5f * fViewH; + triRaster.p[1].x *= 0.5f * fViewW; + triRaster.p[1].y *= 0.5f * fViewH; + triRaster.p[2].x *= 0.5f * fViewW; + triRaster.p[2].y *= 0.5f * fViewH; + vOffsetView = { fViewX,fViewY,0 }; + triRaster.p[0] = Math::Vec_Add(triRaster.p[0], vOffsetView); + triRaster.p[1] = Math::Vec_Add(triRaster.p[1], vOffsetView); + triRaster.p[2] = Math::Vec_Add(triRaster.p[2], vOffsetView); + + // For now, just draw triangle + + //if (flags & RENDER_TEXTURED) + //{/* + // TexturedTriangle( + // triRaster.p[0].x, triRaster.p[0].y, triRaster.t[0].x, triRaster.t[0].y, triRaster.t[0].z, + // triRaster.p[1].x, triRaster.p[1].y, triRaster.t[1].x, triRaster.t[1].y, triRaster.t[1].z, + // triRaster.p[2].x, triRaster.p[2].y, triRaster.t[2].x, triRaster.t[2].y, triRaster.t[2].z, + // sprTexture);*/ + + // RasterTriangle( + // triRaster.p[0].x, triRaster.p[0].y, triRaster.t[0].x, triRaster.t[0].y, triRaster.t[0].z, triRaster.col, + // triRaster.p[1].x, triRaster.p[1].y, triRaster.t[1].x, triRaster.t[1].y, triRaster.t[1].z, triRaster.col, + // triRaster.p[2].x, triRaster.p[2].y, triRaster.t[2].x, triRaster.t[2].y, triRaster.t[2].z, triRaster.col, + // sprTexture, nFlags); + + //} + + if (flags & RENDER_WIRE) + { + DrawTriangleWire(triRaster, olc::RED); + } + else + { + RasterTriangle( + (int)triRaster.p[0].x,(int)triRaster.p[0].y, triRaster.t[0].x, triRaster.t[0].y, triRaster.t[0].z, triRaster.col[0], + (int)triRaster.p[1].x,(int)triRaster.p[1].y, triRaster.t[1].x, triRaster.t[1].y, triRaster.t[1].z, triRaster.col[1], + (int)triRaster.p[2].x,(int)triRaster.p[2].y, triRaster.t[2].x, triRaster.t[2].y, triRaster.t[2].z, triRaster.col[2], + sprTexture, flags); + + } + + + + + nTriangleDrawnCount++; + } + } + } + + return nTriangleDrawnCount; + } + + void GFX3D::RasterTriangle(int x1, int y1, float u1, float v1, float w1, olc::Pixel c1, + int x2, int y2, float u2, float v2, float w2, olc::Pixel c2, + int x3, int y3, float u3, float v3, float w3, olc::Pixel c3, + olc::Sprite* spr, + uint32_t nFlags) + + { + if (y2 < y1) + { + std::swap(y1, y2); std::swap(x1, x2); std::swap(u1, u2); std::swap(v1, v2); std::swap(w1, w2); std::swap(c1, c2); + } + + if (y3 < y1) + { + std::swap(y1, y3); std::swap(x1, x3); std::swap(u1, u3); std::swap(v1, v3); std::swap(w1, w3); std::swap(c1, c3); + } + + if (y3 < y2) + { + std::swap(y2, y3); std::swap(x2, x3); std::swap(u2, u3); std::swap(v2, v3); std::swap(w2, w3); std::swap(c2, c3); + } + + int dy1 = y2 - y1; + int dx1 = x2 - x1; + float dv1 = v2 - v1; + float du1 = u2 - u1; + float dw1 = w2 - w1; + int dcr1 = c2.r - c1.r; + int dcg1 = c2.g - c1.g; + int dcb1 = c2.b - c1.b; + int dca1 = c2.a - c1.a; + + int dy2 = y3 - y1; + int dx2 = x3 - x1; + float dv2 = v3 - v1; + float du2 = u3 - u1; + float dw2 = w3 - w1; + int dcr2 = c3.r - c1.r; + int dcg2 = c3.g - c1.g; + int dcb2 = c3.b - c1.b; + int dca2 = c3.a - c1.a; + + float tex_u, tex_v, tex_w; + float col_r, col_g, col_b, col_a; + + float dax_step = 0, dbx_step = 0, + du1_step = 0, dv1_step = 0, + du2_step = 0, dv2_step = 0, + dw1_step = 0, dw2_step = 0, + dcr1_step = 0, dcr2_step = 0, + dcg1_step = 0, dcg2_step = 0, + dcb1_step = 0, dcb2_step = 0, + dca1_step = 0, dca2_step = 0; + + if (dy1) dax_step = dx1 / (float)abs(dy1); + if (dy2) dbx_step = dx2 / (float)abs(dy2); + + if (dy1) du1_step = du1 / (float)abs(dy1); + if (dy1) dv1_step = dv1 / (float)abs(dy1); + if (dy1) dw1_step = dw1 / (float)abs(dy1); + + if (dy2) du2_step = du2 / (float)abs(dy2); + if (dy2) dv2_step = dv2 / (float)abs(dy2); + if (dy2) dw2_step = dw2 / (float)abs(dy2); + + if (dy1) dcr1_step = dcr1 / (float)abs(dy1); + if (dy1) dcg1_step = dcg1 / (float)abs(dy1); + if (dy1) dcb1_step = dcb1 / (float)abs(dy1); + if (dy1) dca1_step = dca1 / (float)abs(dy1); + + if (dy2) dcr2_step = dcr2 / (float)abs(dy2); + if (dy2) dcg2_step = dcg2 / (float)abs(dy2); + if (dy2) dcb2_step = dcb2 / (float)abs(dy2); + if (dy2) dca2_step = dca2 / (float)abs(dy2); + + float pixel_r = 0.0f; + float pixel_g = 0.0f; + float pixel_b = 0.0f; + float pixel_a = 1.0f; + + if (dy1) + { + for (int i = y1; i <= y2; i++) + { + int ax = int(x1 + (float)(i - y1) * dax_step); + int bx = int(x1 + (float)(i - y1) * dbx_step); + + float tex_su = u1 + (float)(i - y1) * du1_step; + float tex_sv = v1 + (float)(i - y1) * dv1_step; + float tex_sw = w1 + (float)(i - y1) * dw1_step; + + float tex_eu = u1 + (float)(i - y1) * du2_step; + float tex_ev = v1 + (float)(i - y1) * dv2_step; + float tex_ew = w1 + (float)(i - y1) * dw2_step; + + float col_sr = c1.r + (float)(i - y1) * dcr1_step; + float col_sg = c1.g + (float)(i - y1) * dcg1_step; + float col_sb = c1.b + (float)(i - y1) * dcb1_step; + float col_sa = c1.a + (float)(i - y1) * dca1_step; + + float col_er = c1.r + (float)(i - y1) * dcr2_step; + float col_eg = c1.g + (float)(i - y1) * dcg2_step; + float col_eb = c1.b + (float)(i - y1) * dcb2_step; + float col_ea = c1.a + (float)(i - y1) * dca2_step; + + if (ax > bx) + { + std::swap(ax, bx); + std::swap(tex_su, tex_eu); + std::swap(tex_sv, tex_ev); + std::swap(tex_sw, tex_ew); + std::swap(col_sr, col_er); + std::swap(col_sg, col_eg); + std::swap(col_sb, col_eb); + std::swap(col_sa, col_ea); + } + + tex_u = tex_su; + tex_v = tex_sv; + tex_w = tex_sw; + col_r = col_sr; + col_g = col_sg; + col_b = col_sb; + col_a = col_sa; + + float tstep = 1.0f / ((float)(bx - ax)); + float t = 0.0f; + + for (int j = ax; j < bx; j++) + { + tex_u = (1.0f - t) * tex_su + t * tex_eu; + tex_v = (1.0f - t) * tex_sv + t * tex_ev; + tex_w = (1.0f - t) * tex_sw + t * tex_ew; + col_r = (1.0f - t) * col_sr + t * col_er; + col_g = (1.0f - t) * col_sg + t * col_eg; + col_b = (1.0f - t) * col_sb + t * col_eb; + col_a = (1.0f - t) * col_sa + t * col_ea; + + pixel_r = col_r; + pixel_g = col_g; + pixel_b = col_b; + pixel_a = col_a; + + if (nFlags & GFX3D::RENDER_TEXTURED) + { + if (spr != nullptr) + { + olc::Pixel sample = spr->Sample(tex_u / tex_w, tex_v / tex_w); + pixel_r *= sample.r / 255.0f; + pixel_g *= sample.g / 255.0f; + pixel_b *= sample.b / 255.0f; + pixel_a *= sample.a / 255.0f; + } + } + + if (nFlags & GFX3D::RENDER_DEPTH) + { + if (tex_w > m_DepthBuffer[i*pge->ScreenWidth() + j]) + if (pge->Draw(j, i, olc::Pixel(uint8_t(pixel_r * 1.0f), uint8_t(pixel_g * 1.0f), uint8_t(pixel_b * 1.0f), uint8_t(pixel_a * 1.0f)))) + m_DepthBuffer[i*pge->ScreenWidth() + j] = tex_w; + } + else + { + pge->Draw(j, i, olc::Pixel(uint8_t(pixel_r * 1.0f), uint8_t(pixel_g * 1.0f), uint8_t(pixel_b * 1.0f), uint8_t(pixel_a * 1.0f))); + } + + t += tstep; + } + } + } + + dy1 = y3 - y2; + dx1 = x3 - x2; + dv1 = v3 - v2; + du1 = u3 - u2; + dw1 = w3 - w2; + dcr1 = c3.r - c2.r; + dcg1 = c3.g - c2.g; + dcb1 = c3.b - c2.b; + dca1 = c3.a - c2.a; + + if (dy1) dax_step = dx1 / (float)abs(dy1); + if (dy2) dbx_step = dx2 / (float)abs(dy2); + + du1_step = 0; dv1_step = 0; + if (dy1) du1_step = du1 / (float)abs(dy1); + if (dy1) dv1_step = dv1 / (float)abs(dy1); + if (dy1) dw1_step = dw1 / (float)abs(dy1); + + dcr1_step = 0; dcg1_step = 0; dcb1_step = 0; dca1_step = 0; + if (dy1) dcr1_step = dcr1 / (float)abs(dy1); + if (dy1) dcg1_step = dcg1 / (float)abs(dy1); + if (dy1) dcb1_step = dcb1 / (float)abs(dy1); + if (dy1) dca1_step = dca1 / (float)abs(dy1); + + if (dy1) + { + for (int i = y2; i <= y3; i++) + { + int ax = int(x2 + (float)(i - y2) * dax_step); + int bx = int(x1 + (float)(i - y1) * dbx_step); + + float tex_su = u2 + (float)(i - y2) * du1_step; + float tex_sv = v2 + (float)(i - y2) * dv1_step; + float tex_sw = w2 + (float)(i - y2) * dw1_step; + + float tex_eu = u1 + (float)(i - y1) * du2_step; + float tex_ev = v1 + (float)(i - y1) * dv2_step; + float tex_ew = w1 + (float)(i - y1) * dw2_step; + + float col_sr = c2.r + (float)(i - y2) * dcr1_step; + float col_sg = c2.g + (float)(i - y2) * dcg1_step; + float col_sb = c2.b + (float)(i - y2) * dcb1_step; + float col_sa = c2.a + (float)(i - y2) * dca1_step; + + float col_er = c1.r + (float)(i - y1) * dcr2_step; + float col_eg = c1.g + (float)(i - y1) * dcg2_step; + float col_eb = c1.b + (float)(i - y1) * dcb2_step; + float col_ea = c1.a + (float)(i - y1) * dca2_step; + + if (ax > bx) + { + std::swap(ax, bx); + std::swap(tex_su, tex_eu); + std::swap(tex_sv, tex_ev); + std::swap(tex_sw, tex_ew); + std::swap(col_sr, col_er); + std::swap(col_sg, col_eg); + std::swap(col_sb, col_eb); + std::swap(col_sa, col_ea); + } + + tex_u = tex_su; + tex_v = tex_sv; + tex_w = tex_sw; + col_r = col_sr; + col_g = col_sg; + col_b = col_sb; + col_a = col_sa; + + float tstep = 1.0f / ((float)(bx - ax)); + float t = 0.0f; + + for (int j = ax; j < bx; j++) + { + tex_u = (1.0f - t) * tex_su + t * tex_eu; + tex_v = (1.0f - t) * tex_sv + t * tex_ev; + tex_w = (1.0f - t) * tex_sw + t * tex_ew; + col_r = (1.0f - t) * col_sr + t * col_er; + col_g = (1.0f - t) * col_sg + t * col_eg; + col_b = (1.0f - t) * col_sb + t * col_eb; + col_a = (1.0f - t) * col_sa + t * col_ea; + + pixel_r = col_r; + pixel_g = col_g; + pixel_b = col_b; + pixel_a = col_a; + + if (nFlags & GFX3D::RENDER_TEXTURED) + { + if (spr != nullptr) + { + olc::Pixel sample = spr->Sample(tex_u / tex_w, tex_v / tex_w); + pixel_r *= sample.r / 255.0f; + pixel_g *= sample.g / 255.0f; + pixel_b *= sample.b / 255.0f; + pixel_a *= sample.a / 255.0f; + } + } + + if (nFlags & GFX3D::RENDER_DEPTH) + { + if (tex_w > m_DepthBuffer[i*pge->ScreenWidth() + j]) + if (pge->Draw(j, i, olc::Pixel(uint8_t(pixel_r * 1.0f), uint8_t(pixel_g * 1.0f), uint8_t(pixel_b * 1.0f), uint8_t(pixel_a * 1.0f)))) + m_DepthBuffer[i*pge->ScreenWidth() + j] = tex_w; + } + else + { + pge->Draw(j, i, olc::Pixel(uint8_t(pixel_r * 1.0f), uint8_t(pixel_g * 1.0f), uint8_t(pixel_b * 1.0f), uint8_t(pixel_a * 1.0f))); + } + + t += tstep; + } + } + } + } + + + + //GFX3D::MipMap::MipMap() : olc::Sprite(){} + //GFX3D::MipMap::MipMap(std::string sImageFile) : olc::Sprite(sImageFile) + //{ + // GenerateMipLevels(); + //} + //GFX3D::MipMap::MipMap(std::string sImageFile, olc::ResourcePack *pack) : olc::Sprite(sImageFile, pack){} + //GFX3D::MipMap::MipMap(int32_t w, int32_t h) : olc::Sprite(w, h) {} + + //int GFX3D::MipMap::GenerateMipLevels() + //{ + // int nLevelsW = 0; + // int nLevelsH = 0; + // int w = width; + // int h = height; + // while (w > 1) { w >>= 1; nLevelsW++; } + // while (h > 1) { h >>= 1; nLevelsH++; } + + // int nLevels = std::min(nLevelsW, nLevelsH); + + // w = width >> 1; + // h = height >> 1; + + // vecMipMaps.emplace_back(w, h); // Level 0 + // memcpy(vecMipMaps[0].GetData(), GetData(), w*h*sizeof(uint32_t)); + + // for (int i = 1; i < nLevels; i++) + // { + // vecMipMaps.emplace_back(w, h); + // pge->SetDrawTarget(&vecMipMaps[i]); + // for (int x = 0; x < w; x++) + // for (int y = 0; y < h; y++) + // pge->Draw(x, y, vecMipMaps[i-1].SampleBL((float)x / (float)w, (float)y / (float)h)); + // w >>= 1; h >>= 1; + // } + + // pge->SetDrawTarget(nullptr); + // return nLevels; + //} + // + //olc::rcode GFX3D::MipMap::LoadFromFile(std::string sImageFile, olc::ResourcePack *pack) + //{ + // olc::rcode r = olc::Sprite::LoadFromFile(sImageFile, pack); + // if (r == olc::FAIL) return r; + // GenerateMipLevels(); + // return r; + //} + + //olc::rcode GFX3D::MipMap::LoadFromPGESprFile(std::string sImageFile, olc::ResourcePack *pack) + //{ + // olc::rcode r = olc::Sprite::LoadFromPGESprFile(sImageFile, pack); + // if (r == olc::FAIL) return r; + // GenerateMipLevels(); + // return r; + //} + // + //olc::Pixel GFX3D::MipMap::Sample(float x, float y, float z) + //{ + // int nLevel = (int)(z * (float)vecMipMaps.size()); + // return vecMipMaps[nLevel].Sample(x, y); + //} + + //olc::Pixel GFX3D::MipMap::SampleBL(float u, float v, float z); + +} + +#endif \ No newline at end of file diff --git a/Example/olcPGEX_MiniAudio.h b/Example/olcPGEX_MiniAudio.h new file mode 100644 index 0000000..08a6dd1 --- /dev/null +++ b/Example/olcPGEX_MiniAudio.h @@ -0,0 +1,372 @@ +#pragma once +/* +olcPGEX_MiniAudio.h + ++-------------------------------------------------------------+ +| OneLoneCoder Pixel Game Engine Extension | +| MiniAudio v1.0 | ++-------------------------------------------------------------+ + +NOTE: UNDER ACTIVE DEVELOPMENT - THERE MAY BE BUGS/GLITCHES + +What is this? +~~~~~~~~~~~~~ +This extension abstracts the very robust and powerful miniaudio +library. It provides simple loading and playback of WAV and MP3 +files. Because it's built on top of miniaudio, it requires next +to no addictional build configurations in order to be built +for cross-platform. + +License (OLC-3) +~~~~~~~~~~~~~~~ + +Copyright 2023 Moros Smith + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions or derivations of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions or derivative works in binary form must reproduce the above +copyright notice. This list of conditions and the following disclaimer must be +reproduced in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may +be used to endorse or promote products derived from this software without specific +prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +Links +~~~~~ +YouTube: https://www.youtube.com/@Moros1138 +GitHub: https://www.github.com/Moros1138 +Homepage: https://www.moros1138.com +*/ + +#include "olcPixelGameEngine.h" +#include + +#ifdef OLC_PGEX_MINIAUDIO +#define MINIAUDIO_IMPLEMENTATION +#endif + +#include "miniaudio.h" + +namespace olc +{ + class MiniAudio : public olc::PGEX + { + public: + MiniAudio(); + ~MiniAudio(); + virtual bool OnBeforeUserUpdate(float& fElapsedTime) override; + static void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + + public: // LOADING ROUTINES + const int LoadSound(const std::string& path); + + public: // PLAYBACK CONTROLS + // plays a sample, can be set to loop + void Play(const int id, const bool loop = false); + // stops a sample, rewinds to beginning + void Stop(const int id); + // pauses a sample, does not change position + void Pause(const int id); + // toggle between play and pause + void Toggle(const int id, bool rewind = false); + + public: // SEEKING CONTROLS + // seek to the provided position in the sound, by milliseconds + void Seek(const int id, const unsigned long long milliseconds); + // seek to the provided position in the sound, by float 0.f is beginning, 1.0f is end + void Seek(const int id, float& location); + // seek forward from current position by the provided time + void Forward(const int id, const unsigned long long milliseconds); + // seek forward from current position by the provided time + void Rewind(const int id, const unsigned long long milliseconds); + + public: // MISC CONTROLS + // set volume of a sound, 0.0f is mute, 1.0f is full + void SetVolume(const int id, float& volume); + // set pan of a sound, -1.0f is left, 1.0f is right, 0.0f is center + void SetPan(const int id, float& pan); + // set pitch of a sound, 1.0f is normal + void SetPitch(const int id, float& pitch); + + public: // MISC GETTERS + // gets the current position in the sound, in milliseconds + unsigned long long GetCursorMilliseconds(const int id); + // gets the current position in the sound, as a float between 0.0f and 1.0f + float GetCursorFloat(const int id); + + private: + + /* + Soooo, i'm not going to spend a whole lot of time + documenting miniaudio features, if you want to + know more I invite you to visit their very very + nicely documented webiste at: + + https://miniaud.io/docs/manual/index.html + */ + + ma_device device; + ma_engine engine; + ma_resource_manager resourceManager; + + // sample rate for the device and engine + int sampleRate; + // this is where the sounds are kept + std::vector vecSounds; + + }; + + /** + * EXCEPTIONS, long story short. I needed to be able + * to construct the PGEX in a state where it could + * fail at runtime. If you have a better way of + * accomplishing the PGEX pattern without using + * exceptions, I'm open to suggestions! + */ + struct MiniAudioDeviceException : public std::exception + { + const char* what() const throw() + { + return "Failed to initialize a device."; + } + }; + + struct MiniAudioResourceManagerException : public std::exception + { + const char* what() const throw() + { + return "Failed to initialize the resource manager."; + } + }; + + struct MiniAudioEngineException : public std::exception + { + const char* what() const throw() + { + return "Failed to initialize the audio engine."; + } + }; + + struct MiniAudioSoundException : public std::exception + { + const char* what() const throw() + { + return "Failed to initialize a sound."; + } + }; + +} + + +#ifdef OLC_PGEX_MINIAUDIO +#undef OLC_PGEX_MINIAUDIO + +namespace olc +{ + + MiniAudio::MiniAudio() : olc::PGEX(true) + { + sampleRate = 48000; + + ma_device_config deviceConfig = ma_device_config_init(ma_device_type_playback); + deviceConfig.playback.format = ma_format_f32; + deviceConfig.playback.channels = 2; + deviceConfig.sampleRate = sampleRate; + deviceConfig.dataCallback = MiniAudio::data_callback; + deviceConfig.pUserData = &engine; + + if(ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS) + throw MiniAudioDeviceException(); + + ma_resource_manager_config resourceManagerConfig = ma_resource_manager_config_init(); + resourceManagerConfig.decodedFormat = ma_format_f32; + resourceManagerConfig.decodedChannels = 0; + resourceManagerConfig.decodedSampleRate = sampleRate; + +#ifdef __EMSCRIPTEN__ + resourceManagerConfig.jobThreadCount = 0; + resourceManagerConfig.flags |= MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING; + resourceManagerConfig.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING; +#endif + + if(ma_resource_manager_init(&resourceManagerConfig, &resourceManager) != MA_SUCCESS) + throw MiniAudioResourceManagerException(); + + ma_engine_config engineConfig = ma_engine_config_init(); + engineConfig.pDevice = &device; + engineConfig.pResourceManager = &resourceManager; + + if(ma_engine_init(&engineConfig, &engine) != MA_SUCCESS) + throw MiniAudioEngineException(); + + } + + MiniAudio::~MiniAudio() + { + for(auto sound : vecSounds) + { + ma_sound_uninit(sound); + delete sound; + } + + ma_resource_manager_uninit(&resourceManager); + + ma_engine_uninit(&engine); + } + + bool MiniAudio::OnBeforeUserUpdate(float& fElapsedTime) + { +#ifdef __EMSCRIPTEN__ + ma_resource_manager_process_next_job(&resourceManager); +#endif + + return false; + } + + void MiniAudio::data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) + { + ma_engine_read_pcm_frames((ma_engine*)(pDevice->pUserData), pOutput, frameCount, NULL); + } + + const int MiniAudio::LoadSound(const std::string& path) + { + const int id = vecSounds.size(); + vecSounds.push_back(new ma_sound()); + + if(ma_sound_init_from_file(&engine, path.c_str(), MA_SOUND_FLAG_DECODE | MA_SOUND_FLAG_ASYNC, NULL, NULL, vecSounds.at(id)) != MA_SUCCESS) + throw MiniAudioSoundException(); + + return id; + } + + void MiniAudio::Play(const int id, const bool loop) + { + if(ma_sound_is_playing(vecSounds.at(id))) + { + ma_sound_seek_to_pcm_frame(vecSounds.at(id), 0); + return; + } + + ma_sound_set_looping(vecSounds.at(id), loop); + ma_sound_start(vecSounds.at(id)); + } + + void MiniAudio::Stop(const int id) + { + ma_sound_seek_to_pcm_frame(vecSounds.at(id), 0); + ma_sound_stop(vecSounds.at(id)); + } + + void MiniAudio::Pause(const int id) + { + auto it = vecSounds.begin() + id; + ma_sound_stop(vecSounds.at(id)); + } + + void MiniAudio::Toggle(const int id, bool rewind) + { + if(ma_sound_is_playing(vecSounds.at(id))) + { + ma_sound_stop(vecSounds.at(id)); + + if(rewind) + ma_sound_seek_to_pcm_frame(vecSounds.at(id), 0); + + return; + } + + ma_sound_start(vecSounds.at(id)); + } + + void MiniAudio::SetVolume(const int id, float& volume) + { + ma_sound_set_volume(vecSounds.at(id), volume); + } + + void MiniAudio::SetPan(const int id, float& pan) + { + ma_sound_set_pan(vecSounds.at(id), pan); + } + + void MiniAudio::SetPitch(const int id, float& pitch) + { + ma_sound_set_pitch(vecSounds.at(id), pitch); + } + + void MiniAudio::Seek(const int id, const unsigned long long milliseconds) + { + unsigned long long frame = (milliseconds * engine.sampleRate) / 1000; + + ma_sound_seek_to_pcm_frame(vecSounds.at(id), frame); + } + + void MiniAudio::Seek(const int id, float& location) + { + unsigned long long length; + ma_sound_get_length_in_pcm_frames(vecSounds.at(id), &length); + + unsigned long long frame = length * location; + + ma_sound_seek_to_pcm_frame(vecSounds.at(id), frame); + } + + + void MiniAudio::Forward(const int id, const unsigned long long milliseconds) + { + unsigned long long cursor; + ma_sound_get_cursor_in_pcm_frames(vecSounds.at(id), &cursor); + + unsigned long long frame = (milliseconds * engine.sampleRate) / 1000; + ma_sound_seek_to_pcm_frame(vecSounds.at(id), cursor + frame); + } + + void MiniAudio::Rewind(const int id, const unsigned long long milliseconds) + { + unsigned long long cursor; + ma_sound_get_cursor_in_pcm_frames(vecSounds.at(id), &cursor); + + unsigned long long frame = (milliseconds * engine.sampleRate) / 1000; + ma_sound_seek_to_pcm_frame(vecSounds.at(id), cursor - frame); + } + + unsigned long long MiniAudio::GetCursorMilliseconds(const int id) + { + unsigned long long cursor; + ma_sound_get_cursor_in_pcm_frames(vecSounds.at(id), &cursor); + + cursor /= sampleRate; + cursor /= 1000; + + return cursor; + } + + float MiniAudio::GetCursorFloat(const int id) + { + unsigned long long cursor; + ma_sound_get_cursor_in_pcm_frames(vecSounds.at(id), &cursor); + + unsigned long long length; + ma_sound_get_length_in_pcm_frames(vecSounds.at(id), &length); + + return (float)cursor / length; + } + +} // olc + +#endif \ No newline at end of file diff --git a/Example/olcPGEX_QuickGUI.h b/Example/olcPGEX_QuickGUI.h new file mode 100644 index 0000000..30796c2 --- /dev/null +++ b/Example/olcPGEX_QuickGUI.h @@ -0,0 +1,1171 @@ +#pragma once +/* +OneLoneCoder - QuickGUI v1.02 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +A semi-immediate mode GUI for very simple GUI stuff. +Includes: +Label - Displays a single-line string +TextBox - Click to enter/edit single-line text +Button - A clickable labelled rectangle +CheckBox - A clickable labelled rectangle that retains state +ImageButton - A Button with an image instead of text +ImageCheckBox- A CheckBox with an image instead of text +Slider - An omnidirectional draggable handle between two values +ListBox - A list of strings, that can be scrolled and an item selected + +License (OLC-3) +~~~~~~~~~~~~~~~ + +Copyright 2018 - 2021 OneLoneCoder.com + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions or derivations of source code must retain the above +copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions or derivative works in binary form must reproduce +the above copyright notice. This list of conditions and the following +disclaimer must be reproduced in the documentation and/or other +materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Links +~~~~~ +YouTube: https://www.youtube.com/javidx9 +Discord: https://discord.gg/WhwHUMV +Twitter: https://www.twitter.com/javidx9 +Twitch: https://www.twitch.tv/javidx9 +GitHub: https://www.github.com/onelonecoder +Homepage: https://www.onelonecoder.com + +Author +~~~~~~ +David Barr, aka javidx9, �OneLoneCoder 2019, 2020, 2021, 2022 + +Changes +~~~~~~~ +v1.01 +Moved Slider::fGrabRad into "theme" ++Manager::CopyThemeFrom() - copies theme attributes from a different manager ++ListBox - Displays a vector of strings +v1.02 +ImageButton ++ImageCheckBox ++ListBox::bSelectionChanged flag, true when list selected item changes +=Fix - Text box mouse behaviours, mouse release is now meaningless ++CheckBox Fix for decal display + +*/ + +#ifndef OLC_PGEX_QUICKGUI_H +#define OLC_PGEX_QUICKGUI_H + +#include "olcPixelGameEngine.h" + + +namespace olc::QuickGUI +{ + class Manager; + + // Virtual base class for all controls + class BaseControl + { + public: + BaseControl(olc::QuickGUI::Manager& manager); + virtual ~BaseControl(); + + public: + // Switches the control on/off + void Enable(const bool bEnable); + // Sets whether or not the control is interactive/displayed + bool bVisible = true; + + // True on single frame control begins being manipulated + bool bPressed = false; + // True on all frames control is under user manipulation + bool bHeld = false; + // True on single frame control ceases being manipulated + bool bReleased = false; + + public: + // Updates the controls behvaiour + virtual void Update(olc::PixelGameEngine* pge) = 0; + // Draws the control using "sprite" based CPU operations + virtual void Draw(olc::PixelGameEngine* pge) = 0; + // Draws the control using "decal" based GPU operations + virtual void DrawDecal(olc::PixelGameEngine* pge) = 0; + + protected: + // Controls are related to a manager, where the theme resides + // and control groups can be implemented + olc::QuickGUI::Manager& m_manager; + + // All controls exists in one of four states + // Disabled - Greyed out and not interactive + // Normal - interactive and operational + // Hover - currently under the users mouse focus + // Click - user is interacting with the control + enum class State { Disabled, Normal, Hover, Click } m_state = State::Normal; + + // To add a "swish" to things, controls can fade between states + float m_fTransition = 0.0; + }; + + + // A QuickGUI::Manager acts as a convenient grouping of controls + class Manager + { + public: + // Construct Manager, bCleanUpForMe will automatically DELETE any controls + // given to this manager via AddControl() if true + Manager(const bool bCleanUpForMe = true); + virtual ~Manager(); + + public: + // Add a gui element derived form BaseControl to this manager + void AddControl(BaseControl* control); + // Updates all controls this manager operates + void Update(olc::PixelGameEngine* pge); + // Draws as "sprite" all controls this manager operates + void Draw(olc::PixelGameEngine* pge); + // Draws as "decal" all controls this manager operates + void DrawDecal(olc::PixelGameEngine* pge); + + public: // This managers "Theme" can be set here + // Various element colours + olc::Pixel colNormal = olc::DARK_BLUE; + olc::Pixel colHover = olc::BLUE; + olc::Pixel colClick = olc::CYAN; + olc::Pixel colDisable = olc::DARK_GREY; + olc::Pixel colBorder = olc::WHITE; + olc::Pixel colText = olc::WHITE; + // Speed to transiton from Normal -> Hover + float fHoverSpeedOn = 10.0f; + // Speed to transiton from Hover -> Normal + float fHoverSpeedOff = 4.0f; + // Size of grab handle + float fGrabRad = 8.0f; + // Copy all theme attributes into a different manager object + void CopyThemeFrom(const Manager& manager); + + private: + // Should this manager call delete on the controls it opeerates? + bool m_bEraseControlsOnDestroy = true; + // Container of controls + std::vector m_vControls; + }; + + + // Creates a Label Control - it's just text! + class Label : public BaseControl + { + public: + Label(olc::QuickGUI::Manager& manager, // Associate with a Manager + const std::string& text, // Text to display + const olc::vf2d& pos, // Location of label top-left + const olc::vf2d& size); // Size of label + + public: + // Position of button + olc::vf2d vPos; + // Size of button + olc::vf2d vSize; + // Text displayed on button + std::string sText; + // Show a border? + bool bHasBorder = false; + // Show a background? + bool bHasBackground = false; + // Where should the text be positioned? + enum class Alignment + {Left, Centre, Right} nAlign = Alignment::Centre; + + public: // BaseControl overrides + void Update(olc::PixelGameEngine* pge) override; + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + }; + + class TextBox : public Label + { + public: + TextBox(olc::QuickGUI::Manager& manager, // Associate with a Manager + const std::string& text, // Text to display + const olc::vf2d& pos, // Location of text box top-left + const olc::vf2d& size); // Size of text box + + public: // BaseControl overrides + void Update(olc::PixelGameEngine* pge) override; + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + + protected: + bool m_bTextEdit = false; + + }; + + // Creates a Button Control - a clickable, labelled rectangle + class Button : public BaseControl + { + public: + Button(olc::QuickGUI::Manager& manager, // Associate with a Manager + const std::string& text, // Text to display + const olc::vf2d& pos, // Location of button top-left + const olc::vf2d& size); // Size of button + + public: + // Position of button + olc::vf2d vPos; + // Size of button + olc::vf2d vSize; + // Text displayed on button + std::string sText; + + public: // BaseControl overrides + void Update(olc::PixelGameEngine* pge) override; + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + }; + + // Creates a Button Control - a clickable, labelled rectangle + class CheckBox : public Button + { + public: + CheckBox(olc::QuickGUI::Manager& manager, // Associate with a Manager + const std::string& text, // Text to display + const bool check, // Is checked or not? + const olc::vf2d& pos, // Location of button top-left + const olc::vf2d& size); // Size of button + + public: + bool bChecked = false; + + public: // BaseControl overrides + void Update(olc::PixelGameEngine* pge) override; + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + }; + + class ImageButton : public Button + { + public: + ImageButton(olc::QuickGUI::Manager& manager, // Associate with a Manager + const olc::Renderable &icon, // Text to display + const olc::vf2d& pos, // Location of button top-left + const olc::vf2d& size); // Size of button + + public: + const olc::Renderable& pIcon; + + public: + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + }; + + class ImageCheckBox : public ImageButton + { + public: + ImageCheckBox(olc::QuickGUI::Manager& manager, // Associate with a Manager + const olc::Renderable& icon, // Text to display + const bool check, // Is checked or not? + const olc::vf2d& pos, // Location of button top-left + const olc::vf2d& size); // Size of button + + public: + bool bChecked = false; + + public: + void Update(olc::PixelGameEngine* pge) override; + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + }; + + + // Creates a Slider Control - a grabbable handle that slides between two locations + class Slider : public BaseControl + { + public: + Slider(olc::QuickGUI::Manager& manager, // Associate with a Manager + const olc::vf2d& posmin, // Screen location of "minimum" + const olc::vf2d& posmax, // Screen location of "maximum" + const float valmin, // Value of minimum + const float valmax, // Value of maximum + const float value); // Starting value + + public: + // Minium value + float fMin = -100.0f; + // Maximum value + float fMax = +100.0f; + // Current value + float fValue = 0.0f; + + // Location of minimum/start + olc::vf2d vPosMin; + // Location of maximum/end + olc::vf2d vPosMax; + + public: // BaseControl overrides + void Update(olc::PixelGameEngine* pge) override; + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + }; + + + class ListBox : public BaseControl + { + public: + ListBox(olc::QuickGUI::Manager& manager, // Associate with a Manager + std::vector& vList, + const olc::vf2d& pos, // Location of list top-left + const olc::vf2d& size); // Size of list + + // Position of list + olc::vf2d vPos; + // Size of list + olc::vf2d vSize; + // Show a border? + bool bHasBorder = true; + // Show a background? + bool bHasBackground = true; + + public: + Slider *m_pSlider = nullptr; + Manager m_group; + size_t m_nVisibleItems = 0; + std::vector& m_vList; + + public: + // Item currently selected + size_t nSelectedItem = 0; + size_t nPreviouslySelectedItem = 0; + // Has selection changed? + bool bSelectionChanged = false; + + public: // BaseControl overrides + void Update(olc::PixelGameEngine* pge) override; + void Draw(olc::PixelGameEngine* pge) override; + void DrawDecal(olc::PixelGameEngine* pge) override; + }; + + + class ModalDialog : public olc::PGEX + { + public: + ModalDialog(); + + public: + void ShowFileOpen(const std::string& sPath); + + protected: + virtual bool OnBeforeUserUpdate(float& fElapsedTime) override; + + private: + bool m_bShowDialog = false; + + Manager m_manFileSelect; + ListBox* m_listVolumes = nullptr; + ListBox* m_listDirectory = nullptr; + ListBox* m_listFiles = nullptr; + + std::vector m_vVolumes; + std::vector m_vDirectory; + std::vector m_vFiles; + std::filesystem::path m_path; + }; +} + + +#ifdef OLC_PGEX_QUICKGUI +#undef OLC_PGEX_QUICKGUI +namespace olc::QuickGUI +{ + +#pragma region BaseControl + BaseControl::BaseControl(olc::QuickGUI::Manager& manager) : m_manager(manager) + { + m_manager.AddControl(this); + } + + BaseControl::~BaseControl() + { + + } + + void BaseControl::Enable(const bool bEnable) + { + m_state = bEnable ? State::Normal : State::Disabled; + } +#pragma endregion + +#pragma region Manager + Manager::Manager(const bool bCleanUpForMe) + { + m_bEraseControlsOnDestroy = bCleanUpForMe; + } + + Manager::~Manager() + { + if (m_bEraseControlsOnDestroy) + for (auto& p : m_vControls) + delete p; + + m_vControls.clear(); + } + + void Manager::AddControl(BaseControl* control) + { + m_vControls.push_back(control); + } + + void Manager::Update(olc::PixelGameEngine* pge) + { + for (auto& p : m_vControls) p->Update(pge); + } + + void Manager::Draw(olc::PixelGameEngine* pge) + { + for (auto& p : m_vControls) p->Draw(pge); + } + + void Manager::DrawDecal(olc::PixelGameEngine* pge) + { + for (auto& p : m_vControls) p->DrawDecal(pge); + } + + void Manager::CopyThemeFrom(const Manager& manager) + { + this->colBorder = manager.colBorder; + this->colClick = manager.colClick; + this->colDisable = manager.colDisable; + this->colHover = manager.colHover; + this->colNormal = manager.colNormal; + this->colText = manager.colText; + this->fGrabRad = manager.fGrabRad; + this->fHoverSpeedOff = manager.fHoverSpeedOff; + this->fHoverSpeedOn = manager.fHoverSpeedOn; + } +#pragma endregion + +#pragma region Label + Label::Label(olc::QuickGUI::Manager& manager, const std::string& text, const olc::vf2d& pos, const olc::vf2d& size) + : BaseControl(manager) + { + vPos = pos; vSize = size; sText = text; + } + + void Label::Update(olc::PixelGameEngine* pge) + { + + } + + void Label::Draw(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + if (bHasBackground) + { + pge->FillRect(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colNormal); + } + + if(bHasBorder) + pge->DrawRect(vPos, vSize - olc::vf2d(1, 1), m_manager.colBorder); + + olc::vf2d vText = pge->GetTextSizeProp(sText); + switch (nAlign) + { + case Alignment::Left: + pge->DrawStringProp(olc::vf2d( vPos.x + 2.0f, vPos.y + (vSize.y - vText.y) * 0.5f ), sText, m_manager.colText); + break; + case Alignment::Centre: + pge->DrawStringProp(vPos + (vSize - vText) * 0.5f, sText, m_manager.colText); + break; + case Alignment::Right: + pge->DrawStringProp(olc::vf2d{ vPos.x + vSize.x - vText.x - 2.0f, vPos.y + (vSize.y - vText.y) * 0.5f }, sText, m_manager.colText); + break; + } + } + + void Label::DrawDecal(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + if (bHasBackground) + { + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colNormal); + } + + if (bHasBorder) + { + pge->SetDecalMode(olc::DecalMode::WIREFRAME); + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2,2), m_manager.colBorder); + pge->SetDecalMode(olc::DecalMode::NORMAL); + } + + olc::vf2d vText = pge->GetTextSizeProp(sText); + switch (nAlign) + { + case Alignment::Left: + pge->DrawStringPropDecal({ vPos.x + 2.0f, vPos.y + (vSize.y - vText.y) * 0.5f }, sText, m_manager.colText); + break; + case Alignment::Centre: + pge->DrawStringPropDecal(vPos + (vSize - vText) * 0.5f, sText, m_manager.colText); + break; + case Alignment::Right: + pge->DrawStringPropDecal({ vPos.x + vSize.x - vText.x - 2.0f, vPos.y + (vSize.y - vText.y) * 0.5f }, sText, m_manager.colText); + break; + } + } +#pragma endregion + + +#pragma region TextBox + TextBox::TextBox(olc::QuickGUI::Manager& manager, const std::string& text, const olc::vf2d& pos, const olc::vf2d& size) + : Label(manager, text, pos, size) + { + nAlign = Alignment::Left; + bHasBorder = true; + bHasBackground = false; + } + + void TextBox::Update(olc::PixelGameEngine* pge) + { + if (m_state == State::Disabled || !bVisible) + return; + + bPressed = false; + bReleased = false; + + olc::vf2d vMouse = pge->GetMousePos(); + + if (vMouse.x >= vPos.x && vMouse.x < vPos.x + vSize.x && + vMouse.y >= vPos.y && vMouse.y < vPos.y + vSize.y) + { + // Released inside box does nothing to me, but i may have + // to finish off the neighbours... oo err + bPressed = pge->GetMouse(olc::Mouse::LEFT).bPressed; + bReleased = pge->GetMouse(olc::Mouse::LEFT).bReleased; + + if (bPressed && pge->IsTextEntryEnabled() && !m_bTextEdit) + { + pge->TextEntryEnable(false); + } + + + if (bPressed && !pge->IsTextEntryEnabled() && !m_bTextEdit) + { + pge->TextEntryEnable(true, sText); + m_bTextEdit = true; + } + + bHeld = pge->GetMouse(olc::Mouse::LEFT).bHeld; + + + } + else + { + // Released outside box + bPressed = pge->GetMouse(olc::Mouse::LEFT).bPressed; + bReleased = pge->GetMouse(olc::Mouse::LEFT).bReleased; + bHeld = pge->GetMouse(olc::Mouse::LEFT).bHeld; + + if (bPressed && m_bTextEdit) + { + sText = pge->TextEntryGetString(); + pge->TextEntryEnable(false); + m_bTextEdit = false; + } + } + + if (m_bTextEdit && pge->IsTextEntryEnabled()) + sText = pge->TextEntryGetString(); + } + + void TextBox::Draw(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + if (bHasBackground) + { + pge->FillRect(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colNormal); + } + + if (bHasBorder) + pge->DrawRect(vPos, vSize - olc::vf2d(1, 1), m_manager.colBorder); + + if (m_bTextEdit && pge->IsTextEntryEnabled()) + { + // Draw Cursor + int32_t i = pge->TextEntryGetCursor(); + olc::vf2d vCursorPos = pge->GetTextSizeProp(sText.substr(0, i)); + pge->FillRect(olc::vf2d(vPos.x + 2.0f + vCursorPos.x, (vPos.y + (vSize.y - 10.0f) * 0.5f)), { 2, 10 }, m_manager.colText); + } + + // Draw Text + olc::vf2d vText = pge->GetTextSizeProp(sText); + pge->DrawStringProp(olc::vf2d(vPos.x + 2.0f, vPos.y + (vSize.y - vText.y) * 0.5f), sText, m_manager.colText); + + } + + void TextBox::DrawDecal(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + if (bHasBackground) + { + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colNormal); + } + + if (bHasBorder) + { + pge->SetDecalMode(olc::DecalMode::WIREFRAME); + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colBorder); + pge->SetDecalMode(olc::DecalMode::NORMAL); + } + + if (m_bTextEdit && pge->IsTextEntryEnabled()) + { + // Draw Cursor + int32_t i = pge->TextEntryGetCursor(); + olc::vf2d vCursorPos = pge->GetTextSizeProp(sText.substr(0, i)); + pge->FillRectDecal(olc::vf2d(vPos.x + 2.0f + vCursorPos.x, (vPos.y + (vSize.y - 10.0f) * 0.5f)), { 2, 10 }, m_manager.colText); + } + + // Draw Text + olc::vf2d vText = pge->GetTextSizeProp(sText); + pge->DrawStringPropDecal(olc::vf2d(vPos.x + 2.0f, vPos.y + (vSize.y - vText.y) * 0.5f), sText, m_manager.colText); + } +#pragma endregion + +#pragma region Button + Button::Button(olc::QuickGUI::Manager& manager, const std::string& text, const olc::vf2d& pos, const olc::vf2d& size) + : BaseControl(manager) + { + vPos = pos; vSize = size; sText = text; + } + + void Button::Update(olc::PixelGameEngine* pge) + { + if (m_state == State::Disabled || !bVisible) + return; + + bPressed = false; + bReleased = false; + float fElapsedTime = pge->GetElapsedTime(); + + olc::vf2d vMouse = pge->GetMousePos(); + if (m_state != State::Click) + { + if (vMouse.x >= vPos.x && vMouse.x < vPos.x + vSize.x && + vMouse.y >= vPos.y && vMouse.y < vPos.y + vSize.y) + { + m_fTransition += fElapsedTime * m_manager.fHoverSpeedOn; + m_state = State::Hover; + + bPressed = pge->GetMouse(olc::Mouse::LEFT).bPressed; + if (bPressed) + { + m_state = State::Click; + } + + bHeld = pge->GetMouse(olc::Mouse::LEFT).bHeld; + } + else + { + m_fTransition -= fElapsedTime * m_manager.fHoverSpeedOff; + m_state = State::Normal; + } + } + else + { + bHeld = pge->GetMouse(olc::Mouse::LEFT).bHeld; + bReleased = pge->GetMouse(olc::Mouse::LEFT).bReleased; + if (bReleased) m_state = State::Normal; + } + + m_fTransition = std::clamp(m_fTransition, 0.0f, 1.0f); + } + + void Button::Draw(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + switch (m_state) + { + case State::Disabled: + pge->FillRect(vPos, vSize, m_manager.colDisable); + break; + case State::Normal: + case State::Hover: + pge->FillRect(vPos, vSize, olc::PixelLerp(m_manager.colNormal, m_manager.colHover, m_fTransition)); + break; + case State::Click: + pge->FillRect(vPos, vSize, m_manager.colClick); + break; + } + + pge->DrawRect(vPos, vSize - olc::vf2d(1, 1), m_manager.colBorder); + olc::vf2d vText = pge->GetTextSizeProp(sText); + pge->DrawStringProp(vPos + (vSize - vText) * 0.5f, sText, m_manager.colText); + } + + void Button::DrawDecal(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + switch (m_state) + { + case State::Disabled: + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colDisable); + break; + case State::Normal: + case State::Hover: + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), olc::PixelLerp(m_manager.colNormal, m_manager.colHover, m_fTransition)); + break; + case State::Click: + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colClick); + break; + } + pge->SetDecalMode(olc::DecalMode::WIREFRAME); + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colBorder); + pge->SetDecalMode(olc::DecalMode::NORMAL); + + olc::vf2d vText = pge->GetTextSizeProp(sText); + pge->DrawStringPropDecal(vPos + (vSize - vText) * 0.5f, sText, m_manager.colText); + } +#pragma endregion + + +#pragma region ImageButton + ImageButton::ImageButton(olc::QuickGUI::Manager& manager, const olc::Renderable& icon, const olc::vf2d& pos, const olc::vf2d& size) + : Button(manager, "", pos, size), pIcon(icon) + { + + } + + void ImageButton::Draw(olc::PixelGameEngine* pge) + { + Button::Draw(pge); + pge->DrawSprite(vPos + olc::vi2d(4, 4), pIcon.Sprite()); + } + + void ImageButton::DrawDecal(olc::PixelGameEngine* pge) + { + Button::DrawDecal(pge); + pge->DrawDecal(vPos + olc::vi2d(4, 4), pIcon.Decal()); + } +#pragma endregion + + +#pragma region ImageCheckBox + ImageCheckBox::ImageCheckBox(olc::QuickGUI::Manager& manager, const olc::Renderable& gfx, const bool check, const olc::vf2d& pos, const olc::vf2d& size) + : ImageButton(manager, gfx, pos, size) + { + bChecked = check; + } + + void ImageCheckBox::Update(olc::PixelGameEngine* pge) + { + if (m_state == State::Disabled || !bVisible) + return; + + ImageButton::Update(pge); + if (bPressed) bChecked = !bChecked; + } + + void ImageCheckBox::Draw(olc::PixelGameEngine* pge) + { + ImageButton::Draw(pge); + + if (bChecked) + pge->DrawRect(vPos + olc::vf2d(2, 2), vSize - olc::vi2d(5, 5), m_manager.colBorder); + } + + void ImageCheckBox::DrawDecal(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + ImageButton::DrawDecal(pge); + + pge->SetDecalMode(olc::DecalMode::WIREFRAME); + pge->FillRectDecal(vPos + olc::vf2d(2, 2), vSize - olc::vf2d(4, 4), m_manager.colBorder); + pge->SetDecalMode(olc::DecalMode::NORMAL); + + olc::vf2d vText = pge->GetTextSizeProp(sText); + pge->DrawStringPropDecal(vPos + (vSize - vText) * 0.5f, sText, m_manager.colText); + } +#pragma endregion + + +#pragma region CheckBox + CheckBox::CheckBox(olc::QuickGUI::Manager& manager, const std::string& text, const bool check, const olc::vf2d& pos, const olc::vf2d& size) + : Button(manager, text, pos, size) + { + bChecked = check; + } + + void CheckBox::Update(olc::PixelGameEngine* pge) + { + if (m_state == State::Disabled || !bVisible) + return; + + Button::Update(pge); + if (bPressed) + bChecked = !bChecked; + } + + void CheckBox::Draw(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + Button::Draw(pge); + + if (bChecked) + pge->DrawRect(vPos + olc::vf2d(2, 2), vSize - olc::vi2d(5, 5), m_manager.colBorder); + } + + void CheckBox::DrawDecal(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + Button::DrawDecal(pge); + + if (bChecked) + { + pge->SetDecalMode(olc::DecalMode::WIREFRAME); + pge->FillRectDecal(vPos + olc::vf2d(2, 2), vSize - olc::vf2d(4, 4), m_manager.colBorder); + pge->SetDecalMode(olc::DecalMode::NORMAL); + } + + olc::vf2d vText = pge->GetTextSizeProp(sText); + pge->DrawStringPropDecal(vPos + (vSize - vText) * 0.5f, sText, m_manager.colText); + } +#pragma endregion + +#pragma region Slider + Slider::Slider(olc::QuickGUI::Manager& manager, const olc::vf2d& posmin, const olc::vf2d& posmax, const float valmin, const float valmax, const float value) + : BaseControl(manager) + { + vPosMin = posmin; vPosMax = posmax; fMin = valmin; fMax = valmax; fValue = value; + } + + void Slider::Update(olc::PixelGameEngine* pge) + { + if (m_state == State::Disabled || !bVisible) + return; + + float fElapsedTime = pge->GetElapsedTime(); + + olc::vf2d vMouse = pge->GetMousePos(); + bHeld = false; + if (m_state == State::Click) + { + olc::vf2d d = vPosMax - vPosMin; + float u = d.dot(vMouse - vPosMin) / d.mag2(); + fValue = u * (fMax - fMin) + fMin; + bHeld = true; + } + else + { + olc::vf2d vSliderPos = vPosMin + (vPosMax - vPosMin) * ((fValue - fMin) / (fMax - fMin)); + if ((vMouse - vSliderPos).mag2() <= int32_t(m_manager.fGrabRad) * int32_t(m_manager.fGrabRad)) + { + m_fTransition += fElapsedTime * m_manager.fHoverSpeedOn; + m_state = State::Hover; + if (pge->GetMouse(olc::Mouse::LEFT).bPressed) + { + m_state = State::Click; + bPressed = true; + } + } + else + m_state = State::Normal; + } + + if (pge->GetMouse(olc::Mouse::LEFT).bReleased) + { + m_state = State::Normal; + bReleased = true; + } + + if (m_state == State::Normal) + { + m_fTransition -= fElapsedTime * m_manager.fHoverSpeedOff; + m_state = State::Normal; + bHeld = false; + } + + fValue = std::clamp(fValue, fMin, fMax); + m_fTransition = std::clamp(m_fTransition, 0.0f, 1.0f); + } + + void Slider::Draw(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + pge->DrawLine(vPosMin, vPosMax, m_manager.colBorder); + olc::vf2d vSliderPos = vPosMin + (vPosMax - vPosMin) * ((fValue - fMin) / (fMax - fMin)); + + switch (m_state) + { + case State::Disabled: + pge->FillCircle(vSliderPos, int32_t(m_manager.fGrabRad), m_manager.colDisable); + break; + case State::Normal: + case State::Hover: + pge->FillCircle(vSliderPos, int32_t(m_manager.fGrabRad), olc::PixelLerp(m_manager.colNormal, m_manager.colHover, m_fTransition)); + break; + case State::Click: + pge->FillCircle(vSliderPos, int32_t(m_manager.fGrabRad), m_manager.colClick); + break; + } + + + pge->DrawCircle(vSliderPos, int32_t(m_manager.fGrabRad), m_manager.colBorder); + } + + void Slider::DrawDecal(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + pge->DrawLineDecal(vPosMin, vPosMax, m_manager.colBorder); + olc::vf2d vSliderPos = vPosMin + (vPosMax - vPosMin) * ((fValue - fMin) / (fMax - fMin)); + + switch (m_state) + { + case State::Disabled: + pge->FillRectDecal(vSliderPos - olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad), olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad) * 2.0f, m_manager.colDisable); + break; + case State::Normal: + case State::Hover: + pge->FillRectDecal(vSliderPos - olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad), olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad) * 2.0f, olc::PixelLerp(m_manager.colNormal, m_manager.colHover, m_fTransition)); + break; + case State::Click: + pge->FillRectDecal(vSliderPos - olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad), olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad) * 2.0f, m_manager.colClick); + break; + } + + pge->SetDecalMode(olc::DecalMode::WIREFRAME); + pge->FillRectDecal(vSliderPos - olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad), olc::vf2d(m_manager.fGrabRad, m_manager.fGrabRad) * 2.0f, m_manager.colBorder); + pge->SetDecalMode(olc::DecalMode::NORMAL); + } + + +#pragma endregion + +#pragma region ListBox + ListBox::ListBox(olc::QuickGUI::Manager& manager, std::vector& vList, const olc::vf2d& pos, const olc::vf2d& size) + : BaseControl(manager), m_vList(vList) + { + m_group.CopyThemeFrom(m_manager); + vPos = pos; + vSize = size; + m_pSlider = new Slider(m_group, { pos.x + size.x - m_manager.fGrabRad - 1, pos.y + m_manager.fGrabRad + 1 }, + { pos.x + size.x - m_manager.fGrabRad - 1, pos.y + size.y - m_manager.fGrabRad - 1 }, 0, float(m_vList.size()), 0); + } + + void ListBox::Update(olc::PixelGameEngine* pge) + { + if (m_state == State::Disabled || !bVisible) + return; + + + nPreviouslySelectedItem = nSelectedItem; + olc::vf2d vMouse = pge->GetMousePos() - vPos + olc::vi2d(2,0); + if (pge->GetMouse(olc::Mouse::LEFT).bPressed) + { + if (vMouse.x >= 0 && vMouse.x < vSize.x - (m_group.fGrabRad * 2) && vMouse.y >= 0 && vMouse.y < vSize.y) + { + + nSelectedItem = size_t(m_pSlider->fValue + vMouse.y / 10); + } + } + + nSelectedItem = std::clamp(nSelectedItem, size_t(0), m_vList.size()-1); + + bSelectionChanged = nSelectedItem != nPreviouslySelectedItem; + + + m_pSlider->fMax = float(m_vList.size()); + m_group.Update(pge); + } + + void ListBox::Draw(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + if (bHasBackground) + { + pge->FillRect(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colNormal); + } + + if (bHasBorder) + pge->DrawRect(vPos, vSize - olc::vf2d(1, 1), m_manager.colBorder); + + + size_t idx0 = size_t(m_pSlider->fValue); + size_t idx1 = std::min(idx0 + size_t((vSize.y - 4) / 10), m_vList.size()); + + olc::vf2d vTextPos = vPos + olc::vf2d(2,2); + for (size_t idx = idx0; idx < idx1; idx++) + { + if (idx == nSelectedItem) + pge->FillRect(vTextPos - olc::vi2d(1,1), {int32_t(vSize.x - m_group.fGrabRad * 2), 10}, m_group.colHover); + pge->DrawStringProp(vTextPos, m_vList[idx]); + vTextPos.y += 10; + } + + m_group.Draw(pge); + } + + void ListBox::DrawDecal(olc::PixelGameEngine* pge) + { + if (!bVisible) + return; + + if (bHasBackground) + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colNormal); + + size_t idx0 = size_t(m_pSlider->fValue); + size_t idx1 = std::min(idx0 + size_t((vSize.y - 4) / 10), m_vList.size()); + + olc::vf2d vTextPos = vPos + olc::vf2d(2, 2); + for (size_t idx = idx0; idx < idx1; idx++) + { + if (idx == nSelectedItem) + pge->FillRectDecal(vTextPos - olc::vi2d(1, 1), { vSize.x - m_group.fGrabRad * 2.0f, 10.0f }, m_group.colHover); + pge->DrawStringPropDecal(vTextPos, m_vList[idx]); + vTextPos.y += 10; + } + + if (bHasBorder) + { + pge->SetDecalMode(olc::DecalMode::WIREFRAME); + pge->FillRectDecal(vPos + olc::vf2d(1, 1), vSize - olc::vf2d(2, 2), m_manager.colBorder); + pge->SetDecalMode(olc::DecalMode::NORMAL); + } + + m_group.DrawDecal(pge); + } +#pragma endregion + + + +#pragma region Modal + ModalDialog::ModalDialog() : olc::PGEX(true) + { + + // Create File Open Dialog + olc::vi2d vScreenSize = pge->GetScreenSize(); + + m_listDirectory = new ListBox(m_manFileSelect, m_vDirectory, olc::vf2d(20, 20), olc::vf2d(300, 500)); + m_listFiles = new ListBox(m_manFileSelect, m_vFiles, olc::vf2d(330, 20), olc::vf2d(300, 500)); + + m_path = "/"; + for (auto const& dir_entry : std::filesystem::directory_iterator{ m_path }) + { + if(dir_entry.is_directory()) + m_vDirectory.push_back(dir_entry.path().filename().string()); + else + m_vFiles.push_back(dir_entry.path().filename().string()); + } + } + + void ModalDialog::ShowFileOpen(const std::string& sPath) + { + m_bShowDialog = true; + } + + bool ModalDialog::OnBeforeUserUpdate(float& fElapsedTime) + { + if(!m_bShowDialog) return false; + + m_manFileSelect.Update(this->pge); + + if (pge->GetKey(olc::Key::BACK).bPressed) + { + m_path = m_path.parent_path().string() + "/"; + //m_listDirectory->bSelectionChanged = true; + //m_listDirectory->nSelectedItem = 0; + } + + if (m_listDirectory->bSelectionChanged) + { + std::string sDirectory = m_vDirectory[m_listDirectory->nSelectedItem]; + /*if (sDirectory == "..") + m_path = m_path.parent_path().string() + "/"; + else + m_path += sDirectory+ "/";*/ + + + m_path += sDirectory + "/"; + // Reconstruct Lists + m_vDirectory.clear(); + + m_vFiles.clear(); + + + for (auto const& dir_entry : std::filesystem::directory_iterator{ m_path }) + { + if (dir_entry.is_directory() || dir_entry.is_other()) + m_vDirectory.push_back(dir_entry.path().filename().string()); + else + m_vFiles.push_back(dir_entry.path().filename().string()); + } + + //m_vDirectory.push_back(".."); + + //m_listFiles->nSelectedItem = 0; + //m_listDirectory->nSelectedItem = 0; + + } + + pge->DrawStringDecal({ 0,0 }, m_path.string()); + + + + + m_manFileSelect.DrawDecal(this->pge); + + + + if (pge->GetKey(olc::Key::ESCAPE).bPressed) + { + m_bShowDialog = false; + return false; + } + + return true; + } +#pragma endregion + +} +#endif // OLC_PGEX_QUICKGUI +#endif // OLC_PGEX_QUICKGUI_H \ No newline at end of file diff --git a/Example/olcPGEX_TTF.h b/Example/olcPGEX_TTF.h new file mode 100644 index 0000000..897106c --- /dev/null +++ b/Example/olcPGEX_TTF.h @@ -0,0 +1,509 @@ +#ifndef OLC_PGEX_TTF_H +#define OLC_PGEX_TTF_H + +#include "olcPixelGameEngine.h" + +#ifdef WIN32 +#include +#pragma comment(lib, "freetype.lib") +#else +#include +#endif + +#include +#include +#include +#include +#include FT_FREETYPE_H +#include FT_GLYPH_H + +namespace olc { + struct FontRect { + olc::vi2d offset; + olc::vi2d size; + }; + + class Font : public olc::PGEX { + public: + Font() = default; + + ~Font() { + if (fontFace != nullptr) { + FT_Done_Face(fontFace); + } + } + + Font(std::string path, int fontSize) : fontSize(fontSize) { + FT_Error error = FT_New_Face(library, path.c_str(), 0, &fontFace); + if (error) { + const char *errorString = FT_Error_String(error); + if (errorString == nullptr) { + std::cerr + << "An unknown error occured while loading the font! Error code: " + << error << "\n"; + } else { + std::cerr << errorString << "\n"; + } + } + + error = FT_Set_Pixel_Sizes(fontFace, 0, fontSize); + if (error) { + const char *errorString = FT_Error_String(error); + if (errorString == nullptr) { + std::cerr + << "An unknown error occured while loading the font!Error Code: " + << error << "\n"; + } else { + std::cerr << errorString << "\n"; + } + } + } + + Font(const Font &other) = delete; + Font(Font &&other) { + fontFace = other.fontFace; + fontSize = other.fontSize; + fallbacks = std::move(other.fallbacks); + other.fontFace = nullptr; + other.fallbacks.clear(); + } + + Font &operator=(const Font &other) = delete; + Font &operator=(Font &&other) { + fontFace = other.fontFace; + fontSize = other.fontSize; + fallbacks = std::move(other.fallbacks); + other.fontFace = nullptr; + other.fallbacks.clear(); + return *this; + } + + void DrawString(std::u32string string, int x, int y, + olc::Pixel color = olc::BLACK, float angle = 0.0f) { + FT_Matrix rotMat; + rotMat.xx = (FT_Fixed)(std::cos(angle) * 0x10000L); + rotMat.xy = (FT_Fixed)(-std::sin(angle) * 0x10000L); + rotMat.yx = (FT_Fixed)(std::sin(angle) * 0x10000L); + rotMat.yy = (FT_Fixed)(std::cos(angle) * 0x10000L); + + FT_Vector pen; + pen.x = x * 64; + pen.y = (pge->ScreenHeight() - y) * 64; + + olc::Pixel::Mode prevMode = pge->GetPixelMode(); + pge->SetPixelMode(olc::Pixel::ALPHA); + + Font *prevToUse = nullptr; + + for (size_t i = 0; i < string.size(); i++) { + char32_t chr = string[i]; + + + Font *toUse = this; + FT_UInt chrIndex = GetCharIndex(chr); + + if (chrIndex == 0) { + for (auto &font : fallbacks) { + FT_UInt fbChr = font.GetCharIndex(chr); + if (fbChr != 0) { + chrIndex = fbChr; + toUse = &font; + } + } + } + + if (prevToUse == toUse) { + FT_Vector kern; + FT_Get_Kerning(fontFace, string[i - 1], chr, + FT_KERNING_DEFAULT, &kern); + + pen.x += kern.x; + pen.y += kern.y; + } + + FT_Set_Transform(toUse->fontFace, &rotMat, &pen); + FT_Error error = FT_Load_Char(toUse->fontFace, chr, + FT_LOAD_RENDER | FT_LOAD_COLOR); + if (error) { + const char *errorString = FT_Error_String(error); + if (errorString == nullptr) { + std::cerr + << "An unknown error occured while rendering a glyph! Error code: " + << error << "\n"; + } else { + std::cerr << errorString << "\n"; + } + return; + } + + FT_Bitmap bmp = toUse->fontFace->glyph->bitmap; + FT_GlyphSlot slot = toUse->fontFace->glyph; + + + pen.x += toUse->fontFace->glyph->advance.x; + pen.y += toUse->fontFace->glyph->advance.y; + + if(chr=='\n'){ + int scaled_line_spacing = fontFace->size->metrics.height; + pen.y -=scaled_line_spacing; + pen.x=x*64; + } else { + DrawBitmap(slot->bitmap_left, + pge->ScreenHeight() - slot->bitmap_top, bmp, color); + } + + prevToUse = toUse; + } + + pge->SetPixelMode(prevMode); + } + + void DrawString(std::u32string string, olc::vi2d pos, + olc::Pixel color = olc::BLACK, float angle = 0.0f) { + DrawString(string, pos.x, pos.y, color, angle); + } + + FontRect GetStringBounds(std::u32string string, float angle = 0.0f) { + FT_Matrix rotMat; + rotMat.xx = (FT_Fixed)(std::cos(angle) * 0x10000L); + rotMat.xy = (FT_Fixed)(-std::sin(angle) * 0x10000L); + rotMat.yx = (FT_Fixed)(std::sin(angle) * 0x10000L); + rotMat.yy = (FT_Fixed)(std::cos(angle) * 0x10000L); + + FT_Vector pen; + pen.x = 0; + pen.y = 0; + + olc::FontRect rect; + constexpr int intMax = std::numeric_limits::max(); + constexpr int intMin = std::numeric_limits::min(); + + int minX = 0; + int minY = 0; + int maxX = 0; + int maxY = 0; + + Font *prevToUse = nullptr; + + for (size_t i = 0; i < string.size(); i++) { + char32_t chr = string[i]; + + Font *toUse = this; + FT_UInt chrIndex = GetCharIndex(chr); + + if (chrIndex == 0) { + for (auto &font : fallbacks) { + FT_UInt fbChr = font.GetCharIndex(chr); + if (fbChr != 0) { + chrIndex = fbChr; + toUse = &font; + } + } + } + + if (prevToUse == toUse) { + FT_Vector kern; + FT_Get_Kerning(fontFace, string[i - 1], chr, + FT_KERNING_DEFAULT, &kern); + + pen.x += kern.x; + pen.y += kern.y; + } + + FT_Set_Transform(toUse->fontFace, &rotMat, &pen); + FT_Error error = FT_Load_Char(toUse->fontFace, chr, + FT_LOAD_BITMAP_METRICS_ONLY); + if (error) { + const char *errorString = FT_Error_String(error); + if (errorString == nullptr) { + std::cerr + << "An unknown error occured while loading a glyph! Error code: " + << error << "\n"; + } else { + std::cerr << errorString << "\n"; + } + return olc::FontRect{{0, 0}, {0, 0}}; + } + + FT_GlyphSlot slot = toUse->fontFace->glyph; + FT_Glyph glyph; + FT_Get_Glyph(slot, &glyph); + + FT_BBox bbox; + FT_Glyph_Get_CBox(glyph, FT_GLYPH_BBOX_GRIDFIT, &bbox); + + if (bbox.xMin < minX) { + minX = bbox.xMin; + } + if (bbox.xMax > maxX) { + maxX = bbox.xMax; + } + if (bbox.yMin < minY) { + minY = bbox.yMin; + } + if (bbox.yMax > maxY) { + maxY = bbox.yMax; + } + + pen.x += slot->advance.x; + pen.y += slot->advance.y; + + if(chr=='\n'){ + int scaled_line_spacing = fontFace->size->metrics.height; + pen.y-=scaled_line_spacing; + pen.x=minX; + } + FT_Done_Glyph(glyph); + } + + minX /= 64; + minY /= 64; + maxX /= 64; + maxY /= 64; + return olc::FontRect{{minX, -maxY}, {maxX - minX, maxY - minY}}; + } + + olc::Sprite *RenderStringToSprite(std::u32string string, + olc::Pixel color) { + olc::FontRect rect = GetStringBounds(string); + olc::Sprite *sprite = new olc::Sprite{rect.size.x, rect.size.y}; + + for (int x = 0; x < rect.size.x; x++) { + for (int y = 0; y < rect.size.y; y++) { + sprite->SetPixel(x, y, olc::BLANK); + } + } + + FT_Vector pen; + pen.x = -rect.offset.x; + pen.y = (pge->ScreenHeight() + rect.offset.y) * 64; + + olc::Pixel::Mode prevMode = pge->GetPixelMode(); + pge->SetPixelMode(olc::Pixel::ALPHA); + + olc::Font *prevToUse = nullptr; + + for (size_t i = 0; i < string.size(); i++) { + char32_t chr = string[i]; + + Font *toUse = this; + FT_UInt chrIndex = GetCharIndex(chr); + + if (chrIndex == 0) { + for (auto &font : fallbacks) { + FT_UInt fbChr = font.GetCharIndex(chr); + if (fbChr != 0) { + chrIndex = fbChr; + toUse = &font; + } + } + } + + if (prevToUse == toUse) { + FT_Vector kern; + FT_Get_Kerning(fontFace, string[i - 1], chr, + FT_KERNING_DEFAULT, &kern); + + pen.x += kern.x; + pen.y += kern.y; + } + + FT_Set_Transform(toUse->fontFace, nullptr, &pen); + FT_Error error = FT_Load_Char(toUse->fontFace, chr, + FT_LOAD_RENDER | FT_LOAD_COLOR); + if (error) { + const char *errorString = FT_Error_String(error); + if (errorString == nullptr) { + std::cerr + << "An unknown error occured while rendering a glyph! Error code: " + << error << "\n"; + } else { + std::cerr << errorString << "\n"; + } + return nullptr; + } + + FT_Bitmap bmp = toUse->fontFace->glyph->bitmap; + FT_GlyphSlot slot = toUse->fontFace->glyph; + + pen.x += toUse->fontFace->glyph->advance.x; + pen.y += toUse->fontFace->glyph->advance.y; + + if(chr=='\n'){ + int scaled_line_spacing = fontFace->size->metrics.height; + pen.y -=scaled_line_spacing; + pen.x=rect.offset.x; + } else { + DrawBitmapTo(slot->bitmap_left, + pge->ScreenHeight() - slot->bitmap_top, bmp, color, + sprite); + } + } + + pge->SetPixelMode(prevMode); + + return sprite; + } + + olc::Decal *RenderStringToDecal(std::u32string string, + olc::Pixel color) { + Sprite *sprite = RenderStringToSprite(string, color); + olc::Decal *decal = new olc::Decal{sprite}; + return decal; + } + + olc::Renderable RenderStringToRenderable(std::u32string string, + olc::Pixel color) { + Sprite *sprite = RenderStringToSprite(string, color); + olc::Renderable renderable; + renderable.Create(sprite->width, sprite->height); + + for (int x = 0; x < sprite->width; x++) { + for (int y = 0; y < sprite->height; y++) { + renderable.Sprite()->SetPixel(x, y, sprite->GetPixel(x, y)); + } + } + + delete sprite; + + renderable.Decal()->Update(); + + return renderable; + } + + void AddFallbackFont(std::string path) { + fallbacks.emplace_back(path, fontSize); + } + + static bool init() { + FT_Error error = FT_Init_FreeType(&library); + + if (error) { + const char *errorString = FT_Error_String(error); + if (errorString == nullptr) { + std::cerr + << "An unknown error occured while loading the font library! " + "Error code: " + << error << "\n"; + } else { + std::cerr << errorString << "\n"; + } + + return false; + } + return true; + } + + private: + void DrawBitmap(int x, int y, FT_Bitmap bmp, olc::Pixel color) { + switch (bmp.pixel_mode) { + case FT_PIXEL_MODE_MONO: + for (size_t bx = 0; bx < bmp.width; bx++) { + for (size_t by = 0; by < bmp.rows; by++) { + int byteOffset = bx / 8; + char byte = bmp.buffer[by * bmp.pitch + byteOffset]; + bool val = (byte >> (7 - bx % 8)) & 1; + if (val) { + pge->Draw(x + bx, y + by, color); + } + } + } + break; + case FT_PIXEL_MODE_GRAY: + for (size_t bx = 0; bx < bmp.width; bx++) { + for (size_t by = 0; by < bmp.rows; by++) { + uint8_t byte = bmp.buffer[by * bmp.pitch + bx]; + if (byte == 0) { + continue; + } + color.a = byte; + pge->Draw(x + bx, y + by, color); + } + } + break; + case FT_PIXEL_MODE_GRAY2: break; + case FT_PIXEL_MODE_GRAY4: break; + case FT_PIXEL_MODE_LCD: break; + case FT_PIXEL_MODE_LCD_V: break; + case FT_PIXEL_MODE_BGRA: + for (size_t bx = 0; bx < bmp.width; bx++) { + for (size_t by = 0; by < bmp.rows; by++) { + olc::Pixel pixel{ + bmp.buffer[by * bmp.pitch + bx * 4 + 2], + bmp.buffer[by * bmp.pitch + bx * 4 + 1], + bmp.buffer[by * bmp.pitch + bx * 4 + 0], + bmp.buffer[by * bmp.pitch + bx * 4 + 3], + }; + pge->Draw(x + bx, y + by, pixel); + } + } + break; + } + } + void DrawBitmapTo(int x, int y, FT_Bitmap bmp, olc::Pixel color, + olc::Sprite *sprite) { + switch (bmp.pixel_mode) { + case FT_PIXEL_MODE_MONO: + for (size_t bx = 0; bx < bmp.width; bx++) { + for (size_t by = 0; by < bmp.rows; by++) { + int byteOffset = bx / 8; + char byte = bmp.buffer[by * bmp.pitch + byteOffset]; + bool val = (byte >> (7 - bx % 8)) & 1; + if (val) { + sprite->SetPixel(x + bx, y + by, color); + } + } + } + break; + case FT_PIXEL_MODE_GRAY: + for (size_t bx = 0; bx < bmp.width; bx++) { + for (size_t by = 0; by < bmp.rows; by++) { + uint8_t byte = bmp.buffer[by * bmp.pitch + bx]; + if (byte == 0) { + continue; + } + color.a = byte; + sprite->SetPixel(x + bx, y + by, color); + } + } + break; + case FT_PIXEL_MODE_GRAY2: break; + case FT_PIXEL_MODE_GRAY4: break; + case FT_PIXEL_MODE_LCD: break; + case FT_PIXEL_MODE_LCD_V: break; + case FT_PIXEL_MODE_BGRA: + for (size_t bx = 0; bx < bmp.width; bx++) { + for (size_t by = 0; by < bmp.rows; by++) { + olc::Pixel pixel{ + bmp.buffer[by * bmp.pitch + bx * 4 + 2], + bmp.buffer[by * bmp.pitch + bx * 4 + 1], + bmp.buffer[by * bmp.pitch + bx * 4 + 0], + bmp.buffer[by * bmp.pitch + bx * 4 + 3], + }; + sprite->SetPixel(x + bx, y + by, pixel); + } + } + break; + } + } + + FT_UInt GetCharIndex(char32_t charCode) { + return FT_Get_Char_Index(fontFace, charCode); + } + + FT_Face fontFace = nullptr; + std::vector fallbacks; + int fontSize; + + static FT_Library library; + }; +} // namespace olc + +#ifdef OLC_PGEX_TTF +#undef OLC_PGEX_TTF + +FT_Library olc::Font::library; + +#endif + +#endif \ No newline at end of file diff --git a/Example/olcSoundWaveEngine.h b/Example/olcSoundWaveEngine.h new file mode 100644 index 0000000..823e12f --- /dev/null +++ b/Example/olcSoundWaveEngine.h @@ -0,0 +1,1971 @@ +/* + +-------------------------------------------------------------+ + | OneLoneCoder Sound Wave Engine v0.02 | + | "You wanted noise? Well is this loud enough?" - javidx9 | + +-------------------------------------------------------------+ + + What is this? + ~~~~~~~~~~~~~ + olc::SoundWaveEngine is a single file, cross platform audio + interface for lightweight applications that just need a bit of + easy audio manipulation. + + It's origins started in the olcNoiseMaker file that accompanied + javidx9's "Code-It-Yourself: Synthesizer" series. It was refactored + and absorbed into the "olcConsoleGameEngine.h" file, and then + refactored again into olcPGEX_Sound.h, that was an extension to + the awesome "olcPixelGameEngine.h" file. + + Alas, it went underused and began to rot, with many myths circulating + that "it doesnt work" and "it shouldn't be used". These untruths + made javidx9 feel sorry for the poor file, and he decided to breathe + some new life into it, in anticipation of new videos! + + License (OLC-3) + ~~~~~~~~~~~~~~~ + + Copyright 2018 - 2022 OneLoneCoder.com + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met : + + 1. Redistributions or derivations of source code must retain the above + copyright notice, this list of conditionsand the following disclaimer. + + 2. Redistributions or derivative works in binary form must reproduce + the above copyright notice.This list of conditions and the following + disclaimer must be reproduced in the documentation and /or other + materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + Links + ~~~~~ + YouTube: https://www.youtube.com/javidx9 + Discord: https://discord.gg/WhwHUMV + Twitter: https://www.twitter.com/javidx9 + Twitch: https://www.twitch.tv/javidx9 + GitHub: https://www.github.com/onelonecoder + Homepage: https://www.onelonecoder.com + Patreon: https://www.patreon.com/javidx9 + + Thanks + ~~~~~~ + Gorbit99, Dragoneye, Puol + + Authors + ~~~~~~~ + slavka, MaGetzUb, cstd, Moros1138 & javidx9 + + (c)OneLoneCoder 2019, 2020, 2021, 2022 +*/ + + +/* + Using & Installing On Microsoft Windows + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Microsoft Visual Studio + ~~~~~~~~~~~~~~~~~~~~~~~ + 1) Include the header file "olcSoundWaveEngine.h" from a .cpp file in your project. + 2) That's it! + + + Code::Blocks + ~~~~~~~~~~~~ + 1) Make sure your compiler toolchain is NOT the default one installed with Code::Blocks. That + one is old, out of date, and a general mess. Instead, use MSYS2 to install a recent and + decent GCC toolchain, then configure Code::Blocks to use it + + Guide for installing recent GCC for Windows: + https://www.msys2.org/ + Guide for configuring code::blocks: + https://solarianprogrammer.com/2019/11/05/install-gcc-windows/ + https://solarianprogrammer.com/2019/11/16/install-codeblocks-gcc-windows-build-c-cpp-fortran-programs/ + + 2) Include the header file "olcSoundWaveEngine.h" from a .cpp file in your project. + 3) Add these libraries to "Linker Options": user32 winmm + 4) Set this "Compiler Option": -std=c++17 +*/ + +/* + Using & Installing On Linux + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + GNU Compiler Collection (GCC) + ~~~~~~~~~~~~~~~~~~~~~~~ + 1) Include the header file "olcSoundWaveEngine.h" from a .cpp file in your project. + 2) Build with the following command: + + g++ olcSoundWaveEngineExample.cpp -o olcSoundWaveEngineExample -lpulse -lpulse-simple -std=c++17 + + 3) That's it! + +*/ + +/* + Using in multiple-file projects + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + If you intend to use olcSoundWaveEngine across multiple files, it's important to only have one + instance of the implementation. This is done using the compiler preprocessor definition: OLC_SOUNDWAVE + + This is defined typically before the header is included in teh translation unit you wish the implementation + to be associated with. To avoid things getting messy I recommend you create a file "olcSoundWaveEngine.cpp" + and that file includes ONLY the following code: + + #define OLC_SOUNDWAVE + #include "olcSoundWaveEngine.h" +*/ + +/* + 0.01: olcPGEX_Sound.h reworked + +Changed timekeeping to double, added accuracy fix - Thanks scripticuk + +Concept of audio drivers and interface + +All internal timing now double precision + +All internal sampling now single precsion + +Loading form WAV files + +LERPed sampling from all buffers + +Multi-channel audio support + 0.02: +Support multi-channel wave files + +Support for 24-bit wave files + +Wave files are now sample rate invariant + +Linux PulseAudio Updated + +Linux ALSA Updated + +WinMM Updated + +CMake Compatibility + =Fix wave format durations preventing playback + =Various bug fixes + + SIG Modifications: + - Add pause flag and pause timing flags to handle pausing sound playback. +*/ + +#pragma once +#ifndef OLC_SOUNDWAVE_H +#define OLC_SOUNDWAVE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Compiler/System Sensitivity +#if !defined(SOUNDWAVE_USING_WINMM) && !defined(SOUNDWAVE_USING_WASAPI) && \ + !defined(SOUNDWAVE_USING_XAUDIO) && !defined(SOUNDWAVE_USING_OPENAL) && \ + !defined(SOUNDWAVE_USING_ALSA) && !defined(SOUNDWAVE_USING_SDLMIXER) && \ + !defined(SOUNDWAVE_USING_PULSE) \ + +#if defined(_WIN32) +#define SOUNDWAVE_USING_WINMM +#endif +#if defined(__linux__) +#define SOUNDWAVE_USING_PULSE +#endif +#if defined(__APPLE__) +#define SOUNDWAVE_USING_SDLMIXER +#endif +#if defined(__EMSCRIPTEN__) +#define SOUNDWAVE_USING_SDLMIXER +#endif + +#endif + +namespace olc::sound +{ + namespace wave + { + // Physically represents a .WAV file, but the data is stored + // as normalised floating point values + template + class File + { + public: + File() = default; + + File(const size_t nChannels, const size_t nSampleSize, const size_t nSampleRate, const size_t nSamples) + { + m_nChannels = nChannels; + m_nSampleSize = nSampleSize; + m_nSamples = nSamples; + m_nSampleRate = nSampleRate; + m_dDuration = double(m_nSamples) / double(m_nSampleRate); + m_dDurationInSamples = double(m_nSamples); + + m_pRawData = std::make_unique(m_nSamples * m_nChannels); + } + + public: + T* data() const + { + return m_pRawData.get(); + } + + size_t samples() const + { + return m_nSamples; + } + + size_t channels() const + { + return m_nChannels; + } + + size_t samplesize() const + { + return m_nSampleSize; + } + + size_t samplerate() const + { + return m_nSampleRate; + } + + double duration() const + { + return m_dDuration; + } + + double durationInSamples() const + { + return m_dDurationInSamples; + } + + bool LoadFile(const std::string& sFilename) + { + std::ifstream ifs(sFilename, std::ios::binary); + if (!ifs.is_open()) + return false; + + struct WaveFormatHeader + { + uint16_t wFormatTag; /* format type */ + uint16_t nChannels; /* number of channels (i.e. mono, stereo...) */ + uint32_t nSamplesPerSec; /* sample rate */ + uint32_t nAvgBytesPerSec; /* for buffer estimation */ + uint16_t nBlockAlign; /* block size of data */ + uint16_t wBitsPerSample; /* number of bits per sample of mono data */ + }; + + WaveFormatHeader header{ 0 }; + + m_pRawData.reset(); + + char dump[4]; + ifs.read(dump, sizeof(uint8_t) * 4); // Read "RIFF" + if (strncmp(dump, "RIFF", 4) != 0) return false; + + ifs.read(dump, sizeof(uint8_t) * 4); // Not Interested + ifs.read(dump, sizeof(uint8_t) * 4); // Read "WAVE" + if (strncmp(dump, "WAVE", 4) != 0) return false; + + // Read Wave description chunk + ifs.read(dump, sizeof(uint8_t) * 4); // Read "fmt " + ifs.read(dump, sizeof(uint8_t) * 4); // Not Interested + ifs.read((char*)&header, sizeof(WaveFormatHeader)); // Read Wave Format Structure chunk + + // Search for audio data chunk + int32_t nChunksize = 0; + ifs.read(dump, sizeof(uint8_t) * 4); // Read chunk header + ifs.read((char*)&nChunksize, sizeof(uint32_t)); // Read chunk size + + while (strncmp(dump, "data", 4) != 0) + { + // Not audio data, so just skip it + ifs.seekg(nChunksize, std::ios::cur); + ifs.read(dump, sizeof(uint8_t) * 4); // Read next chunk header + ifs.read((char*)&nChunksize, sizeof(uint32_t)); // Read next chunk size + } + + // Finally got to data, so read it all in and convert to float samples + m_nSampleSize = header.wBitsPerSample >> 3; + m_nSamples = nChunksize / (header.nChannels * m_nSampleSize); + m_nChannels = header.nChannels; + m_nSampleRate = header.nSamplesPerSec; + m_pRawData = std::make_unique(m_nSamples * m_nChannels); + m_dDuration = double(m_nSamples) / double(m_nSampleRate); + m_dDurationInSamples = double(m_nSamples); + + T* pSample = m_pRawData.get(); + + // Read in audio data and normalise + for (long i = 0; i < m_nSamples; i++) + { + for (int c = 0; c < m_nChannels; c++) + { + switch (m_nSampleSize) + { + case 1: + { + int8_t s = 0; + ifs.read((char*)&s, sizeof(int8_t)); + *pSample = T(s) / T(std::numeric_limits::max()); + } + break; + + case 2: + { + int16_t s = 0; + ifs.read((char*)&s, sizeof(int16_t)); + *pSample = T(s) / T(std::numeric_limits::max()); + } + break; + + case 3: // 24-bit + { + int32_t s = 0; + ifs.read((char*)&s, 3); + if (s & (1 << 23)) s |= 0xFF000000; + *pSample = T(s) / T(std::pow(2, 23) - 1); + } + break; + + case 4: + { + int32_t s = 0; + ifs.read((char*)&s, sizeof(int32_t)); + *pSample = T(s) / T(std::numeric_limits::max()); + } + break; + } + + pSample++; + } + } + return true; + } + + bool SaveFile(const std::string& sFilename) + { + return false; + } + + + protected: + std::unique_ptr m_pRawData; + size_t m_nSamples = 0; + size_t m_nChannels = 0; + size_t m_nSampleRate = 0; + size_t m_nSampleSize = 0; + double m_dDuration = 0.0; + double m_dDurationInSamples = 0.0; + }; + + template + class View + { + public: + View() = default; + + View(const T* pData, const size_t nSamples) + { + SetData(pData, nSamples); + } + + public: + void SetData(T const* pData, const size_t nSamples, const size_t nStride = 1, const size_t nOffset = 0) + { + m_pData = pData; + m_nSamples = nSamples; + m_nStride = nStride; + m_nOffset = nOffset; + } + + double GetSample(const double dSample) const + { + double d1 = std::floor(dSample); + size_t p1 = static_cast(d1); + size_t p2 = p1 + 1; + + double t = dSample - d1; + double a = GetValue(p1); + double b = GetValue(p2); + + return a + t * (b - a); // std::lerp in C++20 + } + + std::pair GetRange(const double dSample1, const double dSample2) const + { + if (dSample1 < 0 || dSample2 < 0) + return { 0,0 }; + + if (dSample1 > m_nSamples && dSample2 > m_nSamples) + return { 0,0 }; + + double dMin, dMax; + + double d = GetSample(dSample1); + dMin = dMax = d; + + size_t n1 = static_cast(std::ceil(dSample1)); + size_t n2 = static_cast(std::floor(dSample2)); + for (size_t n = n1; n < n2; n++) + { + d = GetValue(n); + dMin = std::min(dMin, d); + dMax = std::max(dMax, d); + } + + d = GetSample(dSample2); + dMin = std::min(dMin, d); + dMax = std::max(dMax, d); + + return { dMin, dMax }; + } + + T GetValue(const size_t nSample) const + { + if (nSample >= m_nSamples) + return 0; + else + return m_pData[m_nOffset + nSample * m_nStride]; + } + + private: + const T* m_pData = nullptr; + size_t m_nSamples = 0; + size_t m_nStride = 1; + size_t m_nOffset = 0; + }; + } + + template + class Wave_generic + { + public: + Wave_generic() = default; + Wave_generic(std::string sWavFile) { LoadAudioWaveform(sWavFile); } + Wave_generic(std::istream& sStream) { LoadAudioWaveform(sStream); } + Wave_generic(const char* pData, const size_t nBytes) { LoadAudioWaveform(pData, nBytes); } + + Wave_generic(const size_t nChannels, const size_t nSampleSize, const size_t nSampleRate, const size_t nSamples) + { + vChannelView.clear(); + file = wave::File(nChannels, nSampleSize, nSampleRate, nSamples); + vChannelView.resize(file.channels()); + for (uint32_t c = 0; c < file.channels(); c++) + vChannelView[c].SetData(file.data(), file.samples(), file.channels(), c); + } + + bool LoadAudioWaveform(std::string sWavFile) + { + vChannelView.clear(); + + if (file.LoadFile(sWavFile)) + { + // Setup views for each channel + vChannelView.resize(file.channels()); + for (uint32_t c = 0; c < file.channels(); c++) + vChannelView[c].SetData(file.data(), file.samples(), file.channels(), c); + + return true; + } + + return false; + } + + + + bool LoadAudioWaveform(std::istream& sStream) { return false; } + bool LoadAudioWaveform(const char* pData, const size_t nBytes) { return false; } + + std::vector> vChannelView; + wave::File file; + }; + + typedef Wave_generic Wave; + + struct WaveInstance + { + Wave* pWave = nullptr; + double dInstanceTime = 0.0; + double dDuration = 0.0; + double dSpeedModifier = 1.0; + bool bFinished = false; + bool bLoop = false; + bool bFlagForStop = false; + bool bPaused = false; + double dPauseTime = 0.0; + }; + + typedef std::list::iterator PlayingWave; + + namespace driver + { + class Base; + } + + // Container class for Basic Sound Manipulation + class WaveEngine + { + + public: + WaveEngine(); + virtual ~WaveEngine(); + + // Configure Audio Hardware + bool InitialiseAudio(uint32_t nSampleRate = 44100, uint32_t nChannels = 1, uint32_t nBlocks = 8, uint32_t nBlockSamples = 512); + + // Release Audio Hardware + bool DestroyAudio(); + + // Call to get the names of all the devices capable of audio output - DACs. An entry + // from the returned collection can be specified as the device to use in UseOutputDevice() + std::vector GetOutputDevices(); + + // Specify a device for audio output prior to calling InitialiseAudio() + void UseOutputDevice(const std::string& sDeviceOut); + + // Call to get the names of all the devices capable of audio input - ADCs. An entry + // from the returned collection can be specified as the device to use in UseInputDevice() + std::vector GetInputDevices(); + + // Specify a device for audio input prior to calling InitialiseAudio() + void UseInputDevice(const std::string& sDeviceOut); + + + void SetCallBack_NewSample(std::function func); + void SetCallBack_SynthFunction(std::function func); + void SetCallBack_FilterFunction(std::function func); + + public: + void SetOutputVolume(const float fVolume); + + + + PlayingWave PlayWaveform(Wave* pWave, bool bLoop = false, double dSpeed = 1.0); + void StopWaveform(const PlayingWave& w); + void StopAll(); + + private: + uint32_t FillOutputBuffer(std::vector& vBuffer, const uint32_t nBufferOffset, const uint32_t nRequiredSamples); + + private: + std::unique_ptr m_driver; + std::function m_funcNewSample; + std::function m_funcUserSynth; + std::function m_funcUserFilter; + + + private: + uint32_t m_nSampleRate = 44100; + uint32_t m_nChannels = 1; + uint32_t m_nBlocks = 8; + uint32_t m_nBlockSamples = 512; + double m_dSamplePerTime = 44100.0; + double m_dTimePerSample = 1.0 / 44100; + double m_dGlobalTime = 0.0; + float m_fOutputVolume = 1.0; + + std::string m_sInputDevice; + std::string m_sOutputDevice; + + private: + std::list m_listWaves; + + public: + uint32_t GetSampleRate() const; + uint32_t GetChannels() const; + uint32_t GetBlocks() const; + uint32_t GetBlockSampleCount() const; + double GetTimePerSample() const; + + + // Friends, for access to FillOutputBuffer from Drivers + friend class driver::Base; + + }; + + namespace driver + { + // DRIVER DEVELOPERS ONLY!!! + // + // This interface allows SoundWave to exchange data with OS audio systems. It + // is not intended of use by regular users. + class Base + { + public: + Base(WaveEngine* pHost); + virtual ~Base(); + + public: + // [IMPLEMENT] Opens a connection to the hardware device, returns true if success + virtual bool Open(const std::string& sOutputDevice, const std::string& sInputDevice); + // [IMPLEMENT] Starts a process that repeatedly requests audio, returns true if success + virtual bool Start(); + // [IMPLEMENT] Stops a process form requesting audio + virtual void Stop(); + // [IMPLEMENT] Closes any connections to hardware devices + virtual void Close(); + + virtual std::vector EnumerateOutputDevices(); + virtual std::vector EnumerateInputDevices(); + + protected: + // [IMPLEMENT IF REQUIRED] Called by driver to exchange data with SoundWave System. Your + // implementation will call this function providing a "DAC" buffer to be filled by + // SoundWave from a buffer of floats filled by the user. + void ProcessOutputBlock(std::vector& vFloatBuffer, std::vector& vDACBuffer); + + // [IMPLEMENT IF REQUIRED] Called by driver to exchange data with SoundWave System. + void GetFullOutputBlock(std::vector& vFloatBuffer); + + // Handle to SoundWave, to interrogate optons, and get user data + WaveEngine* m_pHost = nullptr; + }; + } + + + namespace synth + { + class Property + { + public: + double value = 0.0f; + + public: + Property() = default; + Property(double f); + + public: + Property& operator =(const double f); + }; + + + class Trigger + { + + }; + + + class Module + { + public: + virtual void Update(uint32_t nChannel, double dTime, double dTimeStep) = 0; + }; + + + class ModularSynth + { + public: + ModularSynth(); + + public: + bool AddModule(Module* pModule); + bool RemoveModule(Module* pModule); + bool AddPatch(Property* pInput, Property* pOutput); + bool RemovePatch(Property* pInput, Property* pOutput); + + + public: + void UpdatePatches(); + void Update(uint32_t nChannel, double dTime, double dTimeStep); + + protected: + std::vector m_vModules; + std::vector> m_vPatches; + }; + + + namespace modules + { + class Oscillator : public Module + { + public: + enum class Type + { + Sine, + Saw, + Square, + Triangle, + PWM, + Wave, + Noise, + }; + + public: + // Primary frequency of oscillation + Property frequency = 0.0f; + // Primary amplitude of output + Property amplitude = 1.0f; + // LFO input if required + Property lfo_input = 0.0f; + // Primary Output + Property output; + // Tweakable Parameter + Property parameter = 0.0; + + Type waveform = Type::Sine; + + Wave* pWave = nullptr; + + private: + double phase_acc = 0.0f; + double max_frequency = 20000.0; + uint32_t random_seed = 0xB00B1E5; + + double rndDouble(double min, double max); + uint32_t rnd(); + + + public: + virtual void Update(uint32_t nChannel, double dTime, double dTimeStep) override; + + }; + } + } + + +} + +#if defined(SOUNDWAVE_USING_WINMM) +#define _WIN32_LEAN_AND_MEAN +#include +#undef min +#undef max + +namespace olc::sound::driver +{ + class WinMM : public Base + { + public: + WinMM(WaveEngine* pHost); + ~WinMM(); + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) override; + bool Start() override; + void Stop() override; + void Close() override; + + private: + void DriverLoop(); + void FreeAudioBlock(); + static void CALLBACK waveOutProc(HWAVEOUT hWaveOut, UINT uMsg, DWORD_PTR dwInstance, DWORD dwParam1, DWORD dwParam2); + HWAVEOUT m_hwDevice = nullptr; + std::thread m_thDriverLoop; + std::atomic m_bDriverLoopActive{ false }; + std::unique_ptr[]> m_pvBlockMemory; + std::unique_ptr m_pWaveHeaders; + std::atomic m_nBlockFree = 0; + std::condition_variable m_cvBlockNotZero; + std::mutex m_muxBlockNotZero; + uint32_t m_nBlockCurrent = 0; + }; +} +#endif // SOUNDWAVE_USING_WINMM + +#if defined(SOUNDWAVE_USING_SDLMIXER) + +#if defined(__EMSCRIPTEN__) +#include +#else +#include +#endif + +namespace olc::sound::driver +{ + class SDLMixer final : public Base + { + public: + explicit SDLMixer(WaveEngine* pHost); + ~SDLMixer() final; + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) final; + bool Start() final; + void Stop() final; + void Close() final; + + private: + void FillChunkBuffer(const std::vector& userData) const; + + static void SDLMixerCallback(int channel); + + private: + bool m_keepRunning = false; + Uint16 m_haveFormat = AUDIO_F32SYS; + std::vector audioBuffer; + Mix_Chunk audioChunk; + + static SDLMixer* instance; + }; +} + +#endif // SOUNDWAVE_USING_SDLMIXER + +#if defined(SOUNDWAVE_USING_ALSA) +#include +#include +#include + +namespace olc::sound::driver +{ + // Not thread-safe + template + class RingBuffer + { + public: + RingBuffer() + { } + + void Resize(unsigned int bufnum = 0, unsigned int buflen = 0) + { + m_vBuffers.resize(bufnum); + for (auto& vBuffer : m_vBuffers) + vBuffer.resize(buflen); + } + + std::vector& GetFreeBuffer() + { + assert(!IsFull()); + + std::vector& result = m_vBuffers[m_nTail]; + m_nTail = Next(m_nTail); + return result; + } + + std::vector& GetFullBuffer() + { + assert(!IsEmpty()); + + std::vector& result = m_vBuffers[m_nHead]; + m_nHead = Next(m_nHead); + return result; + } + + bool IsEmpty() + { + return m_nHead == m_nTail; + } + + bool IsFull() + { + return m_nHead == Next(m_nTail); + } + + private: + unsigned int Next(unsigned int current) + { + return (current + 1) % m_vBuffers.size(); + } + + std::vector> m_vBuffers; + unsigned int m_nHead = 0; + unsigned int m_nTail = 0; + }; + + class ALSA : public Base + { + public: + ALSA(WaveEngine* pHost); + ~ALSA(); + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) override; + bool Start() override; + void Stop() override; + void Close() override; + + private: + void DriverLoop(); + + snd_pcm_t* m_pPCM; + RingBuffer m_rBuffers; + std::atomic m_bDriverLoopActive{ false }; + std::thread m_thDriverLoop; + }; +} +#endif // SOUNDWAVE_USING_ALSA + +#if defined(SOUNDWAVE_USING_PULSE) +#include + +namespace olc::sound::driver +{ + class PulseAudio : public Base + { + public: + PulseAudio(WaveEngine* pHost); + ~PulseAudio(); + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) override; + bool Start() override; + void Stop() override; + void Close() override; + + private: + void DriverLoop(); + + pa_simple* m_pPA; + std::atomic m_bDriverLoopActive{ false }; + std::thread m_thDriverLoop; + }; +} +#endif // SOUNDWAVE_USING_PULSE + +#ifdef OLC_SOUNDWAVE +#undef OLC_SOUNDWAVE + +namespace olc::sound +{ + WaveEngine::WaveEngine() + { + m_sInputDevice = "NONE"; + m_sOutputDevice = "DEFAULT"; + +#if defined(SOUNDWAVE_USING_WINMM) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_WASAPI) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_XAUDIO) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_OPENAL) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_ALSA) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_SDLMIXER) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_PULSE) + m_driver = std::make_unique(this); +#endif + } + + WaveEngine::~WaveEngine() + { + DestroyAudio(); + } + + std::vector WaveEngine::GetOutputDevices() + { + return { "XXX" }; + } + + + void WaveEngine::UseOutputDevice(const std::string& sDeviceOut) + { + m_sOutputDevice = sDeviceOut; + } + + std::vector WaveEngine::GetInputDevices() + { + return { "XXX" }; + } + + void WaveEngine::UseInputDevice(const std::string& sDeviceIn) + { + m_sInputDevice = sDeviceIn; + } + + bool WaveEngine::InitialiseAudio(uint32_t nSampleRate, uint32_t nChannels, uint32_t nBlocks, uint32_t nBlockSamples) + { + m_nSampleRate = nSampleRate; + m_nChannels = nChannels; + m_nBlocks = nBlocks; + m_nBlockSamples = nBlockSamples; + m_dSamplePerTime = double(nSampleRate); + m_dTimePerSample = 1.0 / double(nSampleRate); + m_driver->Open(m_sOutputDevice, m_sInputDevice); + m_driver->Start(); + return false; + } + + + bool WaveEngine::DestroyAudio() + { + StopAll(); + m_driver->Stop(); + m_driver->Close(); + return false; + } + + void WaveEngine::SetCallBack_NewSample(std::function func) + { + m_funcNewSample = func; + } + + void WaveEngine::SetCallBack_SynthFunction(std::function func) + { + m_funcUserSynth = func; + } + + void WaveEngine::SetCallBack_FilterFunction(std::function func) + { + m_funcUserFilter = func; + } + + PlayingWave WaveEngine::PlayWaveform(Wave* pWave, bool bLoop, double dSpeed) + { + WaveInstance wi; + wi.bLoop = bLoop; + wi.pWave = pWave; + wi.dSpeedModifier = dSpeed * double(pWave->file.samplerate()) / m_dSamplePerTime; + wi.dDuration = pWave->file.duration() / dSpeed; + wi.dInstanceTime = m_dGlobalTime; + m_listWaves.push_back(wi); + return std::prev(m_listWaves.end()); + } + + void WaveEngine::StopWaveform(const PlayingWave& w) + { + w->bFlagForStop = true; + } + + void WaveEngine::StopAll() + { + for (auto& wave : m_listWaves) + { + wave.bFlagForStop = true; + } + } + + void WaveEngine::SetOutputVolume(const float fVolume) + { + m_fOutputVolume = std::clamp(fVolume, 0.0f, 1.0f); + } + + uint32_t WaveEngine::FillOutputBuffer(std::vector& vBuffer, const uint32_t nBufferOffset, const uint32_t nRequiredSamples) + { + for (uint32_t nSample = 0; nSample < nRequiredSamples; nSample++) + { + double dSampleTime = m_dGlobalTime + nSample * m_dTimePerSample; + + if (m_funcNewSample) + m_funcNewSample(dSampleTime); + + for (uint32_t nChannel = 0; nChannel < m_nChannels; nChannel++) + { + // Construct the sample + float fSample = 0.0f; + + // 1) Sample any active waves + for (auto& wave : m_listWaves) + { + // Is wave instance flagged for stopping? + if (wave.bFlagForStop) + { + wave.bFinished = true; + } + else + { + // Calculate offset into wave instance + double dTimeOffset = dSampleTime - wave.dInstanceTime - wave.dPauseTime; + + // If offset is larger than wave then... + if (dTimeOffset >= wave.dDuration) + { + if (wave.bLoop) + { + // ...if looping, reset the wave instance + wave.dInstanceTime = dSampleTime; + } + else + { + // ...if not looping, flag wave instance as dead + wave.bFinished = true; + } + } + else + { + if (!wave.bPaused) { + // OR, sample the waveform from the correct channel + fSample += float(wave.pWave->vChannelView[nChannel % wave.pWave->file.channels()].GetSample(dTimeOffset * m_dSamplePerTime * wave.dSpeedModifier)); + } + else { + wave.dPauseTime += m_dTimePerSample; + } + } + } + } + + // Remove waveform instances that have finished + m_listWaves.remove_if([](const WaveInstance& wi) {return wi.bFinished; }); + + + // 2) If user is synthesizing, request sample + if (m_funcUserSynth) + fSample += m_funcUserSynth(nChannel, dSampleTime); + + // 3) Apply global filters + + + // 4) If user is filtering, allow manipulation of output + if (m_funcUserFilter) + fSample = m_funcUserFilter(nChannel, dSampleTime, fSample); + + // Place sample in buffer + vBuffer[nBufferOffset + nSample * m_nChannels + nChannel] = fSample * m_fOutputVolume; + } + } + + // UPdate global time, accounting for error (thanks scripticuk) + m_dGlobalTime += nRequiredSamples * m_dTimePerSample; + return nRequiredSamples; + } + + + uint32_t WaveEngine::GetSampleRate() const + { + return m_nSampleRate; + } + + uint32_t WaveEngine::GetChannels() const + { + return m_nChannels; + } + + uint32_t WaveEngine::GetBlocks() const + { + return m_nBlocks; + } + + uint32_t WaveEngine::GetBlockSampleCount() const + { + return m_nBlockSamples; + } + + double WaveEngine::GetTimePerSample() const + { + return m_dTimePerSample; + } + + namespace driver + { + Base::Base(olc::sound::WaveEngine* pHost) : m_pHost(pHost) + {} + + Base::~Base() + {} + + bool Base::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + return false; + } + + bool Base::Start() + { + return false; + } + + void Base::Stop() + { + } + + void Base::Close() + { + } + + std::vector Base::EnumerateOutputDevices() + { + return { "DEFAULT" }; + } + + std::vector Base::EnumerateInputDevices() + { + return { "NONE" }; + } + + void Base::ProcessOutputBlock(std::vector& vFloatBuffer, std::vector& vDACBuffer) + { + constexpr float fMaxSample = float(std::numeric_limits::max()); + constexpr float fMinSample = float(std::numeric_limits::min()); + + // So... why not use vFloatBuffer.size()? Well with this implementation + // we can, but i suspect there may be some platforms that request a + // specific number of samples per "loop" rather than this block architecture + uint32_t nSamplesToProcess = m_pHost->GetBlockSampleCount(); + uint32_t nSampleOffset = 0; + while (nSamplesToProcess > 0) + { + uint32_t nSamplesGathered = m_pHost->FillOutputBuffer(vFloatBuffer, nSampleOffset, nSamplesToProcess); + + // Vector is in float32 format, so convert to hardware required format + for (uint32_t n = 0; n < nSamplesGathered; n++) + { + for (uint32_t c = 0; c < m_pHost->GetChannels(); c++) + { + size_t nSampleID = nSampleOffset + (n * m_pHost->GetChannels() + c); + vDACBuffer[nSampleID] = short(std::clamp(vFloatBuffer[nSampleID] * fMaxSample, fMinSample, fMaxSample)); + } + } + + nSampleOffset += nSamplesGathered; + nSamplesToProcess -= nSamplesGathered; + } + } + + void Base::GetFullOutputBlock(std::vector& vFloatBuffer) + { + uint32_t nSamplesToProcess = m_pHost->GetBlockSampleCount(); + uint32_t nSampleOffset = 0; + while (nSamplesToProcess > 0) + { + uint32_t nSamplesGathered = m_pHost->FillOutputBuffer(vFloatBuffer, nSampleOffset, nSamplesToProcess); + + nSampleOffset += nSamplesGathered; + nSamplesToProcess -= nSamplesGathered; + } + } + } + + namespace synth + { + Property::Property(double f) + { + value = std::clamp(f, -1.0, 1.0); + } + + Property& Property::operator =(const double f) + { + value = std::clamp(f, -1.0, 1.0); + return *this; + } + + + ModularSynth::ModularSynth() + { + + } + + bool ModularSynth::AddModule(Module* pModule) + { + // Check if module already added + if (std::find(m_vModules.begin(), m_vModules.end(), pModule) == std::end(m_vModules)) + { + m_vModules.push_back(pModule); + return true; + } + + return false; + } + + bool ModularSynth::RemoveModule(Module* pModule) + { + if (std::find(m_vModules.begin(), m_vModules.end(), pModule) == std::end(m_vModules)) + { + m_vModules.erase(std::remove(m_vModules.begin(), m_vModules.end(), pModule), m_vModules.end()); + return true; + } + + return false; + } + + bool ModularSynth::AddPatch(Property* pInput, Property* pOutput) + { + // Does patch exist? + std::pair newPatch = std::pair(pInput, pOutput); + + if (std::find(m_vPatches.begin(), m_vPatches.end(), newPatch) == std::end(m_vPatches)) + { + // Patch doesnt exist, now check if either are null + if (pInput != nullptr && pOutput != nullptr) + { + m_vPatches.push_back(newPatch); + return true; + } + } + + return false; + } + + bool ModularSynth::RemovePatch(Property* pInput, Property* pOutput) + { + std::pair newPatch = std::pair(pInput, pOutput); + + if (std::find(m_vPatches.begin(), m_vPatches.end(), newPatch) == std::end(m_vPatches)) + { + m_vPatches.erase(std::remove(m_vPatches.begin(), m_vPatches.end(), newPatch), m_vPatches.end()); + return true; + } + + return false; + } + + void ModularSynth::UpdatePatches() + { + // Update patches + for (auto& patch : m_vPatches) + { + patch.second->value = patch.first->value; + } + } + + + void ModularSynth::Update(uint32_t nChannel, double dTime, double dTimeStep) + { + // Now update synth + for (auto& pModule : m_vModules) + { + pModule->Update(nChannel, dTime, dTimeStep); + } + } + + + namespace modules + { + void Oscillator::Update(uint32_t nChannel, double dTime, double dTimeStep) + { + // We use phase accumulation to combat change in parameter glitches + double w = frequency.value * max_frequency * dTimeStep; + phase_acc += w + lfo_input.value * frequency.value; + if (phase_acc >= 2.0) phase_acc -= 2.0; + + switch (waveform) + { + case Type::Sine: + output = amplitude.value * sin(phase_acc * 2.0 * 3.14159); + break; + + case Type::Saw: + output = amplitude.value * (phase_acc - 1.0) * 2.0; + break; + + case Type::Square: + output = amplitude.value * (phase_acc >= 1.0) ? 1.0 : -1.0; + break; + + case Type::Triangle: + output = amplitude.value * (phase_acc < 1.0) ? (phase_acc * 0.5) : (1.0 - phase_acc * 0.5); + break; + + case Type::PWM: + output = amplitude.value * (phase_acc >= (parameter.value + 1.0)) ? 1.0 : -1.0; + break; + + case Type::Wave: + if (pWave != nullptr) + output = amplitude.value * pWave->vChannelView[nChannel].GetSample(phase_acc * 0.5 * pWave->file.durationInSamples()); + break; + + case Type::Noise: + output = amplitude.value * rndDouble(-1.0, 1.0); + break; + + } + } + + double Oscillator::rndDouble(double min, double max) + { + return ((double)rnd() / (double)(0x7FFFFFFF)) * (max - min) + min; + } + + uint32_t Oscillator::rnd() + { + random_seed += 0xe120fc15; + uint64_t tmp; + tmp = (uint64_t)random_seed * 0x4a39b70d; + uint32_t m1 = uint32_t(((tmp >> 32) ^ tmp) & 0xFFFFFFFF); + tmp = (uint64_t)m1 * 0x12fad5c9; + uint32_t m2 = uint32_t(((tmp >> 32) ^ tmp) & 0xFFFFFFFF); + return m2; + } + } + } +} + + + +#if defined(SOUNDWAVE_USING_WINMM) +// WinMM Driver Implementation +namespace olc::sound::driver +{ +#pragma comment(lib, "winmm.lib") + + WinMM::WinMM(WaveEngine* pHost) : Base(pHost) + { } + + WinMM::~WinMM() + { + Stop(); + Close(); + } + + bool WinMM::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + // Device is available + WAVEFORMATEX waveFormat; + waveFormat.wFormatTag = WAVE_FORMAT_PCM; + waveFormat.nSamplesPerSec = m_pHost->GetSampleRate(); + waveFormat.wBitsPerSample = sizeof(short) * 8; + waveFormat.nChannels = m_pHost->GetChannels(); + waveFormat.nBlockAlign = (waveFormat.wBitsPerSample / 8) * waveFormat.nChannels; + waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; + waveFormat.cbSize = 0; + + // Open Device if valid + if (waveOutOpen(&m_hwDevice, WAVE_MAPPER, &waveFormat, (DWORD_PTR)WinMM::waveOutProc, (DWORD_PTR)this, CALLBACK_FUNCTION) != S_OK) + return false; + + // Allocate array of wave header objects, one per block + m_pWaveHeaders = std::make_unique(m_pHost->GetBlocks()); + + // Allocate block memory - I dont like vector of vectors, so going with this mess instead + // My std::vector's content will change, but their size never will - they are basically array now + m_pvBlockMemory = std::make_unique[]>(m_pHost->GetBlocks()); + for (size_t i = 0; i < m_pHost->GetBlocks(); i++) + m_pvBlockMemory[i].resize(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0); + + // Link headers to block memory - clever, so we only move headers about + // rather than memory... + for (unsigned int n = 0; n < m_pHost->GetBlocks(); n++) + { + m_pWaveHeaders[n].dwBufferLength = DWORD(m_pvBlockMemory[0].size() * sizeof(short)); + m_pWaveHeaders[n].lpData = (LPSTR)(m_pvBlockMemory[n].data()); + } + + // To begin with, all blocks are free + m_nBlockFree = m_pHost->GetBlocks(); + return true; + } + + bool WinMM::Start() + { + // Prepare driver thread for activity + m_bDriverLoopActive = true; + // and get it going! + m_thDriverLoop = std::thread(&WinMM::DriverLoop, this); + return true; + } + + void WinMM::Stop() + { + // Signal the driver loop to exit + m_bDriverLoopActive = false; + + // Wait for driver thread to exit gracefully + if (m_thDriverLoop.joinable()) + m_thDriverLoop.join(); + } + + void WinMM::Close() + { + waveOutClose(m_hwDevice); + } + + // Static Callback wrapper - specific instance is specified + void CALLBACK WinMM::waveOutProc(HWAVEOUT hWaveOut, UINT uMsg, DWORD_PTR dwInstance, DWORD dwParam1, DWORD dwParam2) + { + // All sorts of messages may be pinged here, but we're only interested + // in audio block is finished... + if (uMsg != WOM_DONE) return; + + // ...which has happened so allow driver object to free resource + WinMM* driver = (WinMM*)dwInstance; + driver->FreeAudioBlock(); + } + + void WinMM::FreeAudioBlock() + { + // Audio subsystem is done with the block it was using, thus + // making it available again + m_nBlockFree++; + + // Signal to driver loop that a block is now available. It + // could have been suspended waiting for one + std::unique_lock lm(m_muxBlockNotZero); + m_cvBlockNotZero.notify_one(); + } + + void WinMM::DriverLoop() + { + // We will be using this vector to transfer to the host for filling, with + // user sound data (float32, -1.0 --> +1.0) + std::vector vFloatBuffer(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0.0f); + + // While the system is active, start requesting audio data + while (m_bDriverLoopActive) + { + // Are there any blocks available to fill? ... + if (m_nBlockFree == 0) + { + // ...no, So wait until one is available + std::unique_lock lm(m_muxBlockNotZero); + while (m_nBlockFree == 0) // sometimes, Windows signals incorrectly + { + // This thread will suspend until this CV is signalled + // from FreeAudioBlock. + m_cvBlockNotZero.wait(lm); + } + } + + // ...yes, so use next one, by indicating one fewer + // block is available + m_nBlockFree--; + + // Prepare block for processing, by oddly, marking it as unprepared :P + if (m_pWaveHeaders[m_nBlockCurrent].dwFlags & WHDR_PREPARED) + { + waveOutUnprepareHeader(m_hwDevice, &m_pWaveHeaders[m_nBlockCurrent], sizeof(WAVEHDR)); + } + + // Give the userland the opportunity to fill the buffer. Note that the driver + // doesnt give a hoot about timing. Thats up to the SoundWave interface to + // maintain + + // Userland will populate a float buffer, that gets cleanly converted to + // a buffer of shorts for DAC + ProcessOutputBlock(vFloatBuffer, m_pvBlockMemory[m_nBlockCurrent]); + + // Send block to sound device + waveOutPrepareHeader(m_hwDevice, &m_pWaveHeaders[m_nBlockCurrent], sizeof(WAVEHDR)); + waveOutWrite(m_hwDevice, &m_pWaveHeaders[m_nBlockCurrent], sizeof(WAVEHDR)); + m_nBlockCurrent++; + m_nBlockCurrent %= m_pHost->GetBlocks(); + } + } +} // WinMM Driver Implementation +#endif +#if defined(SOUNDWAVE_USING_SDLMIXER) + +namespace olc::sound::driver +{ + + SDLMixer* SDLMixer::instance = nullptr; + + SDLMixer::SDLMixer(olc::sound::WaveEngine* pHost) + : Base(pHost) + { + instance = this; + } + + SDLMixer::~SDLMixer() + { + Stop(); + Close(); + } + + bool SDLMixer::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + auto errc = Mix_OpenAudioDevice(static_cast(m_pHost->GetSampleRate()), + AUDIO_F32, + static_cast(m_pHost->GetChannels()), + static_cast(m_pHost->GetBlockSampleCount()), + sOutputDevice == "DEFAULT" ? nullptr : sOutputDevice.c_str(), + SDL_AUDIO_ALLOW_FORMAT_CHANGE); + + // Query the actual format of the audio device, as we have allowed it to be changed. + if (errc || !Mix_QuerySpec(nullptr, &m_haveFormat, nullptr)) + { + std::cerr << "Failed to open audio device '" << sOutputDevice << "'" << std::endl; + return false; + } + + // Compute the Mix_Chunk buffer's size according to the format of the audio device + Uint32 bufferSize = 0; + switch (m_haveFormat) + { + case AUDIO_F32: + case AUDIO_S32: + bufferSize = m_pHost->GetBlockSampleCount() * m_pHost->GetChannels() * 4; + break; + case AUDIO_S16: + case AUDIO_U16: + bufferSize = m_pHost->GetBlockSampleCount() * m_pHost->GetChannels() * 2; + break; + case AUDIO_S8: + case AUDIO_U8: + bufferSize = m_pHost->GetBlockSampleCount() * m_pHost->GetChannels() * 1; + break; + default: + std::cerr << "Audio format of device '" << sOutputDevice << "' is not supported" << std::endl; + return false; + } + + // Allocate the buffer once. The size will never change after this + audioBuffer.resize(bufferSize); + audioChunk = { + 0, // 0, as the chunk does not own the array + audioBuffer.data(), // Pointer to data array + bufferSize, // Size in bytes + 128 // Volume; max by default as it's not controlled by the driver. + }; + + return true; + } + + template + void ConvertFloatTo(const std::vector& fromArr, Int* toArr) + { + static auto minVal = static_cast(std::numeric_limits::min()); + static auto maxVal = static_cast(std::numeric_limits::max()); + for (size_t i = 0; i != fromArr.size(); ++i) + { + toArr[i] = static_cast(std::clamp(fromArr[i] * maxVal, minVal, maxVal)); + } + } + + void SDLMixer::FillChunkBuffer(const std::vector& userData) const + { + // Since the audio device might have changed the format we need to provide, + // we convert the wave data from the user to that format. + switch (m_haveFormat) + { + case AUDIO_F32: + memcpy(audioChunk.abuf, userData.data(), audioChunk.alen); + break; + case AUDIO_S32: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_S16: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_U16: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_S8: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_U8: + ConvertFloatTo(userData, audioChunk.abuf); + break; + } + } + + void SDLMixer::SDLMixerCallback(int channel) + { + static std::vector userData(instance->m_pHost->GetBlockSampleCount() * instance->m_pHost->GetChannels()); + + if (channel != 0) + { + std::cerr << "Unexpected channel number" << std::endl; + } + + // Don't add another chunk if we should not keep running + if (!instance->m_keepRunning) + return; + + instance->GetFullOutputBlock(userData); + instance->FillChunkBuffer(userData); + + if (Mix_PlayChannel(0, &instance->audioChunk, 0) == -1) + { + std::cerr << "Error while playing Chunk" << std::endl; + } + } + + bool SDLMixer::Start() + { + m_keepRunning = true; + + // Kickoff the audio driver + SDLMixerCallback(0); + + // SDLMixer handles all other calls to reinsert user data + Mix_ChannelFinished(SDLMixerCallback); + return true; + } + + void SDLMixer::Stop() + { + m_keepRunning = false; + + // Stop might be called multiple times, so we check whether the device is already closed + if (Mix_QuerySpec(nullptr, nullptr, nullptr)) + { + for (int i = 0; i != m_pHost->GetChannels(); ++i) + { + if (Mix_Playing(i)) + Mix_HaltChannel(i); + } + } + } + + void SDLMixer::Close() + { + Mix_CloseAudio(); + } +} + +#endif // SOUNDWAVE_USING_SDLMIXER +#if defined(SOUNDWAVE_USING_ALSA) +// ALSA Driver Implementation +namespace olc::sound::driver +{ + ALSA::ALSA(WaveEngine* pHost) : Base(pHost) + { } + + ALSA::~ALSA() + { + Stop(); + Close(); + } + + bool ALSA::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + // Open PCM stream + int rc = snd_pcm_open(&m_pPCM, "default", SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK); + + // Clear global cache. + // This won't affect users who don't want to create multiple instances of this driver, + // but it will prevent valgrind from whining about "possibly lost" memory. + // If the user's ALSA setup uses a PulseAudio plugin, then valgrind will still compain + // about some "still reachable" data used by that plugin. TODO? + snd_config_update_free_global(); + + if (rc < 0) + return false; + + // Prepare the parameter structure and set default parameters + snd_pcm_hw_params_t* params; + snd_pcm_hw_params_alloca(¶ms); + snd_pcm_hw_params_any(m_pPCM, params); + + // Set other parameters + snd_pcm_hw_params_set_access(m_pPCM, params, SND_PCM_ACCESS_RW_INTERLEAVED); + snd_pcm_hw_params_set_format(m_pPCM, params, SND_PCM_FORMAT_FLOAT); + snd_pcm_hw_params_set_rate(m_pPCM, params, m_pHost->GetSampleRate(), 0); + snd_pcm_hw_params_set_channels(m_pPCM, params, m_pHost->GetChannels()); + snd_pcm_hw_params_set_period_size(m_pPCM, params, m_pHost->GetBlockSampleCount(), 0); + snd_pcm_hw_params_set_periods(m_pPCM, params, m_pHost->GetBlocks(), 0); + + // Save these parameters + rc = snd_pcm_hw_params(m_pPCM, params); + if (rc < 0) + return false; + + return true; + } + + bool ALSA::Start() + { + // Unsure if really needed, helped prevent underrun on my setup + std::vector vSilence(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0.0f); + snd_pcm_start(m_pPCM); + for (unsigned int i = 0; i < m_pHost->GetBlocks(); i++) + snd_pcm_writei(m_pPCM, vSilence.data(), m_pHost->GetBlockSampleCount()); + + m_rBuffers.Resize(m_pHost->GetBlocks(), m_pHost->GetBlockSampleCount() * m_pHost->GetChannels()); + + snd_pcm_start(m_pPCM); + m_bDriverLoopActive = true; + m_thDriverLoop = std::thread(&ALSA::DriverLoop, this); + + return true; + } + + void ALSA::Stop() + { + // Signal the driver loop to exit + m_bDriverLoopActive = false; + + // Wait for driver thread to exit gracefully + if (m_thDriverLoop.joinable()) + m_thDriverLoop.join(); + + if (m_pPCM != nullptr) + snd_pcm_drop(m_pPCM); + } + + void ALSA::Close() + { + if (m_pPCM != nullptr) + { + snd_pcm_close(m_pPCM); + m_pPCM = nullptr; + } + // Clear the global cache again for good measure + snd_config_update_free_global(); + } + + void ALSA::DriverLoop() + { + const uint32_t nFrames = m_pHost->GetBlockSampleCount(); + + int err; + std::vector vFDs; + + int nFDs = snd_pcm_poll_descriptors_count(m_pPCM); + if (nFDs < 0) + { + std::cerr << "snd_pcm_poll_descriptors_count returned " << nFDs << "\n"; + std::cerr << "disabling polling\n"; + nFDs = 0; + } + else + { + vFDs.resize(nFDs); + + err = snd_pcm_poll_descriptors(m_pPCM, vFDs.data(), vFDs.size()); + if (err < 0) + { + std::cerr << "snd_pcm_poll_descriptors returned " << err << "\n"; + std::cerr << "disabling polling\n"; + vFDs = {}; + } + } + + // While the system is active, start requesting audio data + while (m_bDriverLoopActive) + { + if (!m_rBuffers.IsFull()) + { + // Grab some audio data + auto& vFreeBuffer = m_rBuffers.GetFreeBuffer(); + GetFullOutputBlock(vFreeBuffer); + } + + // Wait a bit if our buffer is full + auto avail = snd_pcm_avail_update(m_pPCM); + while (m_rBuffers.IsFull() && avail < nFrames) + { + if (vFDs.size() == 0) break; + + err = poll(vFDs.data(), vFDs.size(), -1); + if (err < 0) + std::cerr << "poll returned " << err << "\n"; + + unsigned short revents; + err = snd_pcm_poll_descriptors_revents(m_pPCM, vFDs.data(), vFDs.size(), &revents); + if (err < 0) + std::cerr << "snd_pcm_poll_descriptors_revents returned " << err << "\n"; + + if (revents & POLLERR) + std::cerr << "POLLERR\n"; + + avail = snd_pcm_avail_update(m_pPCM); + } + + // Write whatever we can + while (!m_rBuffers.IsEmpty() && avail >= nFrames) + { + auto vFullBuffer = m_rBuffers.GetFullBuffer(); + uint32_t nWritten = 0; + + while (nWritten < nFrames) + { + auto err = snd_pcm_writei(m_pPCM, vFullBuffer.data() + nWritten, nFrames - nWritten); + if (err > 0) + nWritten += err; + else + { + std::cerr << "snd_pcm_writei returned " << err << "\n"; + break; + } + } + avail = snd_pcm_avail_update(m_pPCM); + } + } + } +} // ALSA Driver Implementation +#endif +#if defined(SOUNDWAVE_USING_PULSE) +// PULSE Driver Implementation +#include +#include + +namespace olc::sound::driver +{ + PulseAudio::PulseAudio(WaveEngine* pHost) : Base(pHost) + { } + + PulseAudio::~PulseAudio() + { + Stop(); + Close(); + } + + bool PulseAudio::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + pa_sample_spec ss{ + PA_SAMPLE_FLOAT32, m_pHost->GetSampleRate(), (uint8_t)m_pHost->GetChannels() + }; + + m_pPA = pa_simple_new(NULL, "olcSoundWaveEngine", PA_STREAM_PLAYBACK, NULL, + "Output Stream", &ss, NULL, NULL, NULL); + + if (m_pPA == NULL) + return false; + + return true; + } + + bool PulseAudio::Start() + { + m_bDriverLoopActive = true; + m_thDriverLoop = std::thread(&PulseAudio::DriverLoop, this); + + return true; + } + + void PulseAudio::Stop() + { + // Signal the driver loop to exit + m_bDriverLoopActive = false; + + // Wait for driver thread to exit gracefully + if (m_thDriverLoop.joinable()) + m_thDriverLoop.join(); + } + + void PulseAudio::Close() + { + if (m_pPA != nullptr) + { + pa_simple_free(m_pPA); + m_pPA = nullptr; + } + } + + void PulseAudio::DriverLoop() + { + // We will be using this vector to transfer to the host for filling, with + // user sound data (float32, -1.0 --> +1.0) + std::vector vFloatBuffer(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0.0f); + + // While the system is active, start requesting audio data + while (m_bDriverLoopActive) + { + // Grab audio data from user + GetFullOutputBlock(vFloatBuffer); + + // Fill PulseAudio data buffer + int error; + if (pa_simple_write(m_pPA, vFloatBuffer.data(), + vFloatBuffer.size() * sizeof(float), &error) < 0) + { + std::cerr << "Failed to feed data to PulseAudio: " << pa_strerror(error) << "\n"; + } + } + } +} // PulseAudio Driver Implementation +#endif + +#endif // OLC_SOUNDWAVE IMPLEMENTATION +#endif // OLC_SOUNDWAVE_H diff --git a/Example/olcSoundWaveEngine_pause.h b/Example/olcSoundWaveEngine_pause.h new file mode 100644 index 0000000..8cbe47e --- /dev/null +++ b/Example/olcSoundWaveEngine_pause.h @@ -0,0 +1,1983 @@ +/* + +-------------------------------------------------------------+ + | OneLoneCoder Sound Wave Engine v0.02 | + | "You wanted noise? Well is this loud enough?" - javidx9 | + +-------------------------------------------------------------+ + + What is this? + ~~~~~~~~~~~~~ + olc::SoundWaveEngine is a single file, cross platform audio + interface for lightweight applications that just need a bit of + easy audio manipulation. + + It's origins started in the olcNoiseMaker file that accompanied + javidx9's "Code-It-Yourself: Synthesizer" series. It was refactored + and absorbed into the "olcConsoleGameEngine.h" file, and then + refactored again into olcPGEX_Sound.h, that was an extension to + the awesome "olcPixelGameEngine.h" file. + + Alas, it went underused and began to rot, with many myths circulating + that "it doesnt work" and "it shouldn't be used". These untruths + made javidx9 feel sorry for the poor file, and he decided to breathe + some new life into it, in anticipation of new videos! + + License (OLC-3) + ~~~~~~~~~~~~~~~ + + Copyright 2018 - 2022 OneLoneCoder.com + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met : + + 1. Redistributions or derivations of source code must retain the above + copyright notice, this list of conditionsand the following disclaimer. + + 2. Redistributions or derivative works in binary form must reproduce + the above copyright notice.This list of conditions and the following + disclaimer must be reproduced in the documentation and /or other + materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + Links + ~~~~~ + YouTube: https://www.youtube.com/javidx9 + Discord: https://discord.gg/WhwHUMV + Twitter: https://www.twitter.com/javidx9 + Twitch: https://www.twitch.tv/javidx9 + GitHub: https://www.github.com/onelonecoder + Homepage: https://www.onelonecoder.com + Patreon: https://www.patreon.com/javidx9 + + Thanks + ~~~~~~ + Gorbit99, Dragoneye, Puol + + Authors + ~~~~~~~ + slavka, MaGetzUb, cstd, Moros1138 & javidx9 + + (c)OneLoneCoder 2019, 2020, 2021, 2022 +*/ + + +/* + Using & Installing On Microsoft Windows + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Microsoft Visual Studio + ~~~~~~~~~~~~~~~~~~~~~~~ + 1) Include the header file "olcSoundWaveEngine.h" from a .cpp file in your project. + 2) That's it! + + + Code::Blocks + ~~~~~~~~~~~~ + 1) Make sure your compiler toolchain is NOT the default one installed with Code::Blocks. That + one is old, out of date, and a general mess. Instead, use MSYS2 to install a recent and + decent GCC toolchain, then configure Code::Blocks to use it + + Guide for installing recent GCC for Windows: + https://www.msys2.org/ + Guide for configuring code::blocks: + https://solarianprogrammer.com/2019/11/05/install-gcc-windows/ + https://solarianprogrammer.com/2019/11/16/install-codeblocks-gcc-windows-build-c-cpp-fortran-programs/ + + 2) Include the header file "olcSoundWaveEngine.h" from a .cpp file in your project. + 3) Add these libraries to "Linker Options": user32 winmm + 4) Set this "Compiler Option": -std=c++17 +*/ + +/* + Using & Installing On Linux + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + GNU Compiler Collection (GCC) + ~~~~~~~~~~~~~~~~~~~~~~~ + 1) Include the header file "olcSoundWaveEngine.h" from a .cpp file in your project. + 2) Build with the following command: + + g++ olcSoundWaveEngineExample.cpp -o olcSoundWaveEngineExample -lpulse -lpulse-simple -std=c++17 + + 3) That's it! + +*/ + +/* + Using in multiple-file projects + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + If you intend to use olcSoundWaveEngine across multiple files, it's important to only have one + instance of the implementation. This is done using the compiler preprocessor definition: OLC_SOUNDWAVE + + This is defined typically before the header is included in teh translation unit you wish the implementation + to be associated with. To avoid things getting messy I recommend you create a file "olcSoundWaveEngine.cpp" + and that file includes ONLY the following code: + + #define OLC_SOUNDWAVE + #include "olcSoundWaveEngine.h" +*/ + +/* + 0.01: olcPGEX_Sound.h reworked + +Changed timekeeping to double, added accuracy fix - Thanks scripticuk + +Concept of audio drivers and interface + +All internal timing now double precision + +All internal sampling now single precsion + +Loading form WAV files + +LERPed sampling from all buffers + +Multi-channel audio support + 0.02: +Support multi-channel wave files + +Support for 24-bit wave files + +Wave files are now sample rate invariant + +Linux PulseAudio Updated + +Linux ALSA Updated + +WinMM Updated + +CMake Compatibility + =Fix wave format durations preventing playback + =Various bug fixes +*/ + +#pragma once +#ifndef OLC_SOUNDWAVE_H +#define OLC_SOUNDWAVE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Compiler/System Sensitivity +#if !defined(SOUNDWAVE_USING_WINMM) && !defined(SOUNDWAVE_USING_WASAPI) && \ + !defined(SOUNDWAVE_USING_XAUDIO) && !defined(SOUNDWAVE_USING_OPENAL) && \ + !defined(SOUNDWAVE_USING_ALSA) && !defined(SOUNDWAVE_USING_SDLMIXER) && \ + !defined(SOUNDWAVE_USING_PULSE) \ + +#if defined(_WIN32) +#define SOUNDWAVE_USING_WINMM +#endif +#if defined(__linux__) +#define SOUNDWAVE_USING_PULSE +#endif +#if defined(__APPLE__) +#define SOUNDWAVE_USING_SDLMIXER +#endif +#if defined(__EMSCRIPTEN__) +#define SOUNDWAVE_USING_SDLMIXER +#endif + +#endif + +namespace olc::sound +{ + + namespace wave + { + // Physically represents a .WAV file, but the data is stored + // as normalised floating point values + template + class File + { + public: + File() = default; + + File(const size_t nChannels, const size_t nSampleSize, const size_t nSampleRate, const size_t nSamples) + { + m_nChannels = nChannels; + m_nSampleSize = nSampleSize; + m_nSamples = nSamples; + m_nSampleRate = nSampleRate; + m_dDuration = double(m_nSamples) / double(m_nSampleRate); + m_dDurationInSamples = double(m_nSamples); + + m_pRawData = std::make_unique(m_nSamples * m_nChannels); + } + + public: + T* data() const + { + return m_pRawData.get(); + } + + size_t samples() const + { + return m_nSamples; + } + + size_t channels() const + { + return m_nChannels; + } + + size_t samplesize() const + { + return m_nSampleSize; + } + + size_t samplerate() const + { + return m_nSampleRate; + } + + double duration() const + { + return m_dDuration; + } + + double durationInSamples() const + { + return m_dDurationInSamples; + } + + bool LoadFile(const std::string& sFilename) + { + std::ifstream ifs(sFilename, std::ios::binary); + if (!ifs.is_open()) + return false; + + struct WaveFormatHeader + { + uint16_t wFormatTag; /* format type */ + uint16_t nChannels; /* number of channels (i.e. mono, stereo...) */ + uint32_t nSamplesPerSec; /* sample rate */ + uint32_t nAvgBytesPerSec; /* for buffer estimation */ + uint16_t nBlockAlign; /* block size of data */ + uint16_t wBitsPerSample; /* number of bits per sample of mono data */ + }; + + WaveFormatHeader header{ 0 }; + + m_pRawData.reset(); + + char dump[4]; + ifs.read(dump, sizeof(uint8_t) * 4); // Read "RIFF" + if (strncmp(dump, "RIFF", 4) != 0) return false; + + ifs.read(dump, sizeof(uint8_t) * 4); // Not Interested + ifs.read(dump, sizeof(uint8_t) * 4); // Read "WAVE" + if (strncmp(dump, "WAVE", 4) != 0) return false; + + // Read Wave description chunk + ifs.read(dump, sizeof(uint8_t) * 4); // Read "fmt " + ifs.read(dump, sizeof(uint8_t) * 4); // Not Interested + ifs.read((char*)&header, sizeof(WaveFormatHeader)); // Read Wave Format Structure chunk + + // Search for audio data chunk + int32_t nChunksize = 0; + ifs.read(dump, sizeof(uint8_t) * 4); // Read chunk header + ifs.read((char*)&nChunksize, sizeof(uint32_t)); // Read chunk size + + while (strncmp(dump, "data", 4) != 0) + { + // Not audio data, so just skip it + ifs.seekg(nChunksize, std::ios::cur); + ifs.read(dump, sizeof(uint8_t) * 4); // Read next chunk header + ifs.read((char*)&nChunksize, sizeof(uint32_t)); // Read next chunk size + } + + // Finally got to data, so read it all in and convert to float samples + m_nSampleSize = header.wBitsPerSample >> 3; + m_nSamples = nChunksize / (header.nChannels * m_nSampleSize); + m_nChannels = header.nChannels; + m_nSampleRate = header.nSamplesPerSec; + m_pRawData = std::make_unique(m_nSamples * m_nChannels); + m_dDuration = double(m_nSamples) / double(m_nSampleRate); + m_dDurationInSamples = double(m_nSamples); + + T* pSample = m_pRawData.get(); + + // Read in audio data and normalise + for (long i = 0; i < m_nSamples; i++) + { + for (int c = 0; c < m_nChannels; c++) + { + switch (m_nSampleSize) + { + case 1: + { + int8_t s = 0; + ifs.read((char*)&s, sizeof(int8_t)); + *pSample = T(s) / T(std::numeric_limits::max()); + } + break; + + case 2: + { + int16_t s = 0; + ifs.read((char*)&s, sizeof(int16_t)); + *pSample = T(s) / T(std::numeric_limits::max()); + } + break; + + case 3: // 24-bit + { + int32_t s = 0; + ifs.read((char*)&s, 3); + if (s & (1 << 23)) s |= 0xFF000000; + *pSample = T(s) / T(std::pow(2, 23) - 1); + } + break; + + case 4: + { + int32_t s = 0; + ifs.read((char*)&s, sizeof(int32_t)); + *pSample = T(s) / T(std::numeric_limits::max()); + } + break; + } + + pSample++; + } + } + return true; + } + + bool SaveFile(const std::string& sFilename) + { + return false; + } + + + protected: + std::unique_ptr m_pRawData; + size_t m_nSamples = 0; + size_t m_nChannels = 0; + size_t m_nSampleRate = 0; + size_t m_nSampleSize = 0; + double m_dDuration = 0.0; + double m_dDurationInSamples = 0.0; + }; + + template + class View + { + public: + View() = default; + + View(const T* pData, const size_t nSamples) + { + SetData(pData, nSamples); + } + + public: + void SetData(T const* pData, const size_t nSamples, const size_t nStride = 1, const size_t nOffset = 0) + { + m_pData = pData; + m_nSamples = nSamples; + m_nStride = nStride; + m_nOffset = nOffset; + } + + double GetSample(const double dSample) const + { + double d1 = std::floor(dSample); + size_t p1 = static_cast(d1); + size_t p2 = p1 + 1; + + double t = dSample - d1; + double a = GetValue(p1); + double b = GetValue(p2); + + return a + t * (b - a); // std::lerp in C++20 + } + + std::pair GetRange(const double dSample1, const double dSample2) const + { + if (dSample1 < 0 || dSample2 < 0) + return { 0,0 }; + + if (dSample1 > m_nSamples && dSample2 > m_nSamples) + return { 0,0 }; + + double dMin, dMax; + + double d = GetSample(dSample1); + dMin = dMax = d; + + size_t n1 = static_cast(std::ceil(dSample1)); + size_t n2 = static_cast(std::floor(dSample2)); + for (size_t n = n1; n < n2; n++) + { + d = GetValue(n); + dMin = std::min(dMin, d); + dMax = std::max(dMax, d); + } + + d = GetSample(dSample2); + dMin = std::min(dMin, d); + dMax = std::max(dMax, d); + + return { dMin, dMax }; + } + + T GetValue(const size_t nSample) const + { + if (nSample >= m_nSamples) + return 0; + else + return m_pData[m_nOffset + nSample * m_nStride]; + } + + private: + const T* m_pData = nullptr; + size_t m_nSamples = 0; + size_t m_nStride = 1; + size_t m_nOffset = 0; + }; + } + + template + class Wave_generic + { + public: + Wave_generic() = default; + Wave_generic(std::string sWavFile) { LoadAudioWaveform(sWavFile); } + Wave_generic(std::istream& sStream) { LoadAudioWaveform(sStream); } + Wave_generic(const char* pData, const size_t nBytes) { LoadAudioWaveform(pData, nBytes); } + + Wave_generic(const size_t nChannels, const size_t nSampleSize, const size_t nSampleRate, const size_t nSamples) + { + vChannelView.clear(); + file = wave::File(nChannels, nSampleSize, nSampleRate, nSamples); + vChannelView.resize(file.channels()); + for (uint32_t c = 0; c < file.channels(); c++) + vChannelView[c].SetData(file.data(), file.samples(), file.channels(), c); + } + + bool LoadAudioWaveform(std::string sWavFile) + { + vChannelView.clear(); + + if (file.LoadFile(sWavFile)) + { + // Setup views for each channel + vChannelView.resize(file.channels()); + for (uint32_t c = 0; c < file.channels(); c++) + vChannelView[c].SetData(file.data(), file.samples(), file.channels(), c); + + return true; + } + + return false; + } + + + + bool LoadAudioWaveform(std::istream& sStream) { return false; } + bool LoadAudioWaveform(const char* pData, const size_t nBytes) { return false; } + + std::vector> vChannelView; + wave::File file; + }; + + typedef Wave_generic Wave; + + struct WaveInstance + { + Wave* pWave = nullptr; + double dInstanceTime = 0.0; + double dDuration = 0.0; + double dSpeedModifier = 1.0; + bool bFinished = false; + bool bLoop = false; + bool bFlagForStop = false; + bool bPaused = false; + }; + + typedef std::list::iterator PlayingWave; + + namespace driver + { + class Base; + } + + // Container class for Basic Sound Manipulation + class WaveEngine + { + + public: + WaveEngine(); + virtual ~WaveEngine(); + + // Configure Audio Hardware + bool InitialiseAudio(uint32_t nSampleRate = 44100, uint32_t nChannels = 1, uint32_t nBlocks = 8, uint32_t nBlockSamples = 512); + + // Release Audio Hardware + bool DestroyAudio(); + + // Call to get the names of all the devices capable of audio output - DACs. An entry + // from the returned collection can be specified as the device to use in UseOutputDevice() + std::vector GetOutputDevices(); + + // Specify a device for audio output prior to calling InitialiseAudio() + void UseOutputDevice(const std::string& sDeviceOut); + + // Call to get the names of all the devices capable of audio input - ADCs. An entry + // from the returned collection can be specified as the device to use in UseInputDevice() + std::vector GetInputDevices(); + + // Specify a device for audio input prior to calling InitialiseAudio() + void UseInputDevice(const std::string& sDeviceOut); + + + void SetCallBack_NewSample(std::function func); + void SetCallBack_SynthFunction(std::function func); + void SetCallBack_FilterFunction(std::function func); + + public: + void SetOutputVolume(const float fVolume); + + + + PlayingWave PlayWaveform(Wave* pWave, bool bLoop = false, double dSpeed = 1.0); + void PauseWaveform(const PlayingWave& w); + void ResumeWaveform(const PlayingWave& w); + void RewindWaveform(const PlayingWave& w); + void StopWaveform(const PlayingWave& w); + void StopAll(); + + private: + uint32_t FillOutputBuffer(std::vector& vBuffer, const uint32_t nBufferOffset, const uint32_t nRequiredSamples); + + private: + std::unique_ptr m_driver; + std::function m_funcNewSample; + std::function m_funcUserSynth; + std::function m_funcUserFilter; + + + private: + uint32_t m_nSampleRate = 44100; + uint32_t m_nChannels = 1; + uint32_t m_nBlocks = 8; + uint32_t m_nBlockSamples = 512; + double m_dSamplePerTime = 44100.0; + double m_dTimePerSample = 1.0 / 44100; + double m_dGlobalTime = 0.0; + float m_fOutputVolume = 1.0; + + std::string m_sInputDevice; + std::string m_sOutputDevice; + + private: + std::list m_listWaves; + + public: + uint32_t GetSampleRate() const; + uint32_t GetChannels() const; + uint32_t GetBlocks() const; + uint32_t GetBlockSampleCount() const; + double GetTimePerSample() const; + + + // Friends, for access to FillOutputBuffer from Drivers + friend class driver::Base; + + }; + + namespace driver + { + // DRIVER DEVELOPERS ONLY!!! + // + // This interface allows SoundWave to exchange data with OS audio systems. It + // is not intended of use by regular users. + class Base + { + public: + Base(WaveEngine* pHost); + virtual ~Base(); + + public: + // [IMPLEMENT] Opens a connection to the hardware device, returns true if success + virtual bool Open(const std::string& sOutputDevice, const std::string& sInputDevice); + // [IMPLEMENT] Starts a process that repeatedly requests audio, returns true if success + virtual bool Start(); + // [IMPLEMENT] Stops a process form requesting audio + virtual void Stop(); + // [IMPLEMENT] Closes any connections to hardware devices + virtual void Close(); + + virtual std::vector EnumerateOutputDevices(); + virtual std::vector EnumerateInputDevices(); + + protected: + // [IMPLEMENT IF REQUIRED] Called by driver to exchange data with SoundWave System. Your + // implementation will call this function providing a "DAC" buffer to be filled by + // SoundWave from a buffer of floats filled by the user. + void ProcessOutputBlock(std::vector& vFloatBuffer, std::vector& vDACBuffer); + + // [IMPLEMENT IF REQUIRED] Called by driver to exchange data with SoundWave System. + void GetFullOutputBlock(std::vector& vFloatBuffer); + + // Handle to SoundWave, to interrogate optons, and get user data + WaveEngine* m_pHost = nullptr; + }; + } + + + namespace synth + { + class Property + { + public: + double value = 0.0f; + + public: + Property() = default; + Property(double f); + + public: + Property& operator =(const double f); + }; + + + class Trigger + { + + }; + + + class Module + { + public: + virtual void Update(uint32_t nChannel, double dTime, double dTimeStep) = 0; + }; + + + class ModularSynth + { + public: + ModularSynth(); + + public: + bool AddModule(Module* pModule); + bool RemoveModule(Module* pModule); + bool AddPatch(Property* pInput, Property* pOutput); + bool RemovePatch(Property* pInput, Property* pOutput); + + + public: + void UpdatePatches(); + void Update(uint32_t nChannel, double dTime, double dTimeStep); + + protected: + std::vector m_vModules; + std::vector> m_vPatches; + }; + + + namespace modules + { + class Oscillator : public Module + { + public: + enum class Type + { + Sine, + Saw, + Square, + Triangle, + PWM, + Wave, + Noise, + }; + + public: + // Primary frequency of oscillation + Property frequency = 0.0f; + // Primary amplitude of output + Property amplitude = 1.0f; + // LFO input if required + Property lfo_input = 0.0f; + // Primary Output + Property output; + // Tweakable Parameter + Property parameter = 0.0; + + Type waveform = Type::Sine; + + Wave* pWave = nullptr; + + private: + double phase_acc = 0.0f; + double max_frequency = 20000.0; + uint32_t random_seed = 0xB00B1E5; + + double rndDouble(double min, double max); + uint32_t rnd(); + + + public: + virtual void Update(uint32_t nChannel, double dTime, double dTimeStep) override; + + }; + } + } + + +} + +#if defined(SOUNDWAVE_USING_WINMM) +#define _WIN32_LEAN_AND_MEAN +#include +#undef min +#undef max + +namespace olc::sound::driver +{ + class WinMM : public Base + { + public: + WinMM(WaveEngine* pHost); + ~WinMM(); + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) override; + bool Start() override; + void Stop() override; + void Close() override; + + private: + void DriverLoop(); + void FreeAudioBlock(); + static void CALLBACK waveOutProc(HWAVEOUT hWaveOut, UINT uMsg, DWORD_PTR dwInstance, DWORD dwParam1, DWORD dwParam2); + HWAVEOUT m_hwDevice = nullptr; + std::thread m_thDriverLoop; + std::atomic m_bDriverLoopActive{ false }; + std::unique_ptr[]> m_pvBlockMemory; + std::unique_ptr m_pWaveHeaders; + std::atomic m_nBlockFree = 0; + std::condition_variable m_cvBlockNotZero; + std::mutex m_muxBlockNotZero; + uint32_t m_nBlockCurrent = 0; + }; +} +#endif // SOUNDWAVE_USING_WINMM + +#if defined(SOUNDWAVE_USING_SDLMIXER) + +#if defined(__EMSCRIPTEN__) +#include +#else +#include +#endif + +namespace olc::sound::driver +{ + class SDLMixer final : public Base + { + public: + explicit SDLMixer(WaveEngine* pHost); + ~SDLMixer() final; + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) final; + bool Start() final; + void Stop() final; + void Close() final; + + private: + void FillChunkBuffer(const std::vector& userData) const; + + static void SDLMixerCallback(int channel); + + private: + bool m_keepRunning = false; + Uint16 m_haveFormat = AUDIO_F32SYS; + std::vector audioBuffer; + Mix_Chunk audioChunk; + + static SDLMixer* instance; + }; +} + +#endif // SOUNDWAVE_USING_SDLMIXER + +#if defined(SOUNDWAVE_USING_ALSA) +#include +#include +#include + +namespace olc::sound::driver +{ + // Not thread-safe + template + class RingBuffer + { + public: + RingBuffer() + { } + + void Resize(unsigned int bufnum = 0, unsigned int buflen = 0) + { + m_vBuffers.resize(bufnum); + for (auto& vBuffer : m_vBuffers) + vBuffer.resize(buflen); + } + + std::vector& GetFreeBuffer() + { + assert(!IsFull()); + + std::vector& result = m_vBuffers[m_nTail]; + m_nTail = Next(m_nTail); + return result; + } + + std::vector& GetFullBuffer() + { + assert(!IsEmpty()); + + std::vector& result = m_vBuffers[m_nHead]; + m_nHead = Next(m_nHead); + return result; + } + + bool IsEmpty() + { + return m_nHead == m_nTail; + } + + bool IsFull() + { + return m_nHead == Next(m_nTail); + } + + private: + unsigned int Next(unsigned int current) + { + return (current + 1) % m_vBuffers.size(); + } + + std::vector> m_vBuffers; + unsigned int m_nHead = 0; + unsigned int m_nTail = 0; + }; + + class ALSA : public Base + { + public: + ALSA(WaveEngine* pHost); + ~ALSA(); + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) override; + bool Start() override; + void Stop() override; + void Close() override; + + private: + void DriverLoop(); + + snd_pcm_t* m_pPCM; + RingBuffer m_rBuffers; + std::atomic m_bDriverLoopActive{ false }; + std::thread m_thDriverLoop; + }; +} +#endif // SOUNDWAVE_USING_ALSA + +#if defined(SOUNDWAVE_USING_PULSE) +#include + +namespace olc::sound::driver +{ + class PulseAudio : public Base + { + public: + PulseAudio(WaveEngine* pHost); + ~PulseAudio(); + + protected: + bool Open(const std::string& sOutputDevice, const std::string& sInputDevice) override; + bool Start() override; + void Stop() override; + void Close() override; + + private: + void DriverLoop(); + + pa_simple* m_pPA; + std::atomic m_bDriverLoopActive{ false }; + std::thread m_thDriverLoop; + }; +} +#endif // SOUNDWAVE_USING_PULSE + +#ifdef OLC_SOUNDWAVE +#undef OLC_SOUNDWAVE + +namespace olc::sound +{ + WaveEngine::WaveEngine() + { + m_sInputDevice = "NONE"; + m_sOutputDevice = "DEFAULT"; + +#if defined(SOUNDWAVE_USING_WINMM) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_WASAPI) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_XAUDIO) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_OPENAL) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_ALSA) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_SDLMIXER) + m_driver = std::make_unique(this); +#endif + +#if defined(SOUNDWAVE_USING_PULSE) + m_driver = std::make_unique(this); +#endif + } + + WaveEngine::~WaveEngine() + { + DestroyAudio(); + } + + std::vector WaveEngine::GetOutputDevices() + { + return { "XXX" }; + } + + + void WaveEngine::UseOutputDevice(const std::string& sDeviceOut) + { + m_sOutputDevice = sDeviceOut; + } + + std::vector WaveEngine::GetInputDevices() + { + return { "XXX" }; + } + + void WaveEngine::UseInputDevice(const std::string& sDeviceIn) + { + m_sInputDevice = sDeviceIn; + } + + bool WaveEngine::InitialiseAudio(uint32_t nSampleRate, uint32_t nChannels, uint32_t nBlocks, uint32_t nBlockSamples) + { + m_nSampleRate = nSampleRate; + m_nChannels = nChannels; + m_nBlocks = nBlocks; + m_nBlockSamples = nBlockSamples; + m_dSamplePerTime = double(nSampleRate); + m_dTimePerSample = 1.0 / double(nSampleRate); + m_driver->Open(m_sOutputDevice, m_sInputDevice); + m_driver->Start(); + return false; + } + + + bool WaveEngine::DestroyAudio() + { + StopAll(); + m_driver->Stop(); + m_driver->Close(); + return false; + } + + void WaveEngine::SetCallBack_NewSample(std::function func) + { + m_funcNewSample = func; + } + + void WaveEngine::SetCallBack_SynthFunction(std::function func) + { + m_funcUserSynth = func; + } + + void WaveEngine::SetCallBack_FilterFunction(std::function func) + { + m_funcUserFilter = func; + } + + PlayingWave WaveEngine::PlayWaveform(Wave* pWave, bool bLoop, double dSpeed) + { + WaveInstance wi; + wi.bLoop = bLoop; + wi.pWave = pWave; + wi.dSpeedModifier = dSpeed * double(pWave->file.samplerate()) / m_dSamplePerTime; + wi.dDuration = pWave->file.duration() / dSpeed; + wi.dInstanceTime = m_dGlobalTime; + m_listWaves.push_back(wi); + return std::prev(m_listWaves.end()); + } + + void WaveEngine::PauseWaveform(const PlayingWave& w) { + w->bPaused = true; + } + + void WaveEngine::ResumeWaveform(const PlayingWave& w) { + w->bPaused = false; + } + + void WaveEngine::RewindWaveform(const PlayingWave& w) { + w->dInstanceTime = m_dGlobalTime; + } + + void WaveEngine::StopWaveform(const PlayingWave& w) + { + w->bFlagForStop = true; + } + + void WaveEngine::StopAll() + { + for (auto& wave : m_listWaves) + { + wave.bFlagForStop = true; + } + } + + void WaveEngine::SetOutputVolume(const float fVolume) + { + m_fOutputVolume = std::clamp(fVolume, 0.0f, 1.0f); + } + + uint32_t WaveEngine::FillOutputBuffer(std::vector& vBuffer, const uint32_t nBufferOffset, const uint32_t nRequiredSamples) + { + for (uint32_t nSample = 0; nSample < nRequiredSamples; nSample++) + { + double dSampleTime = m_dGlobalTime + nSample * m_dTimePerSample; + + if (m_funcNewSample) + m_funcNewSample(dSampleTime); + + for (uint32_t nChannel = 0; nChannel < m_nChannels; nChannel++) + { + // Construct the sample + float fSample = 0.0f; + + // 1) Sample any active waves + for (auto& wave : m_listWaves) + { + // Is wave instance flagged for stopping? + if (wave.bFlagForStop) + { + wave.bFinished = true; + } + else + { + // Calculate offset into wave instance + double dTimeOffset = dSampleTime - wave.dInstanceTime; + + // If offset is larger than wave then... + if (dTimeOffset >= wave.dDuration) + { + if (wave.bLoop) + { + // ...if looping, reset the wave instance + wave.dInstanceTime = dSampleTime; + } + else + { + // ...if not looping, flag wave instance as dead + wave.bFinished = true; + } + } + else + { + // OR, sample the waveform from the correct channel + if (!wave.bPaused) { + fSample += float(wave.pWave->vChannelView[nChannel % wave.pWave->file.channels()].GetSample(dTimeOffset * m_dSamplePerTime * wave.dSpeedModifier)); + } + else { + wave.dInstanceTime += m_dTimePerSample; + } + } + } + } + + // Remove waveform instances that have finished + m_listWaves.remove_if([](const WaveInstance& wi) {return wi.bFinished; }); + + + // 2) If user is synthesizing, request sample + if (m_funcUserSynth) + fSample += m_funcUserSynth(nChannel, dSampleTime); + + // 3) Apply global filters + + + // 4) If user is filtering, allow manipulation of output + if (m_funcUserFilter) + fSample = m_funcUserFilter(nChannel, dSampleTime, fSample); + + // Place sample in buffer + vBuffer[nBufferOffset + nSample * m_nChannels + nChannel] = fSample * m_fOutputVolume; + } + } + + // UPdate global time, accounting for error (thanks scripticuk) + m_dGlobalTime += nRequiredSamples * m_dTimePerSample; + return nRequiredSamples; + } + + + uint32_t WaveEngine::GetSampleRate() const + { + return m_nSampleRate; + } + + uint32_t WaveEngine::GetChannels() const + { + return m_nChannels; + } + + uint32_t WaveEngine::GetBlocks() const + { + return m_nBlocks; + } + + uint32_t WaveEngine::GetBlockSampleCount() const + { + return m_nBlockSamples; + } + + double WaveEngine::GetTimePerSample() const + { + return m_dTimePerSample; + } + + namespace driver + { + Base::Base(olc::sound::WaveEngine* pHost) : m_pHost(pHost) + {} + + Base::~Base() + {} + + bool Base::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + return false; + } + + bool Base::Start() + { + return false; + } + + void Base::Stop() + { + } + + void Base::Close() + { + } + + std::vector Base::EnumerateOutputDevices() + { + return { "DEFAULT" }; + } + + std::vector Base::EnumerateInputDevices() + { + return { "NONE" }; + } + + void Base::ProcessOutputBlock(std::vector& vFloatBuffer, std::vector& vDACBuffer) + { + constexpr float fMaxSample = float(std::numeric_limits::max()); + constexpr float fMinSample = float(std::numeric_limits::min()); + + // So... why not use vFloatBuffer.size()? Well with this implementation + // we can, but i suspect there may be some platforms that request a + // specific number of samples per "loop" rather than this block architecture + uint32_t nSamplesToProcess = m_pHost->GetBlockSampleCount(); + uint32_t nSampleOffset = 0; + while (nSamplesToProcess > 0) + { + uint32_t nSamplesGathered = m_pHost->FillOutputBuffer(vFloatBuffer, nSampleOffset, nSamplesToProcess); + + // Vector is in float32 format, so convert to hardware required format + for (uint32_t n = 0; n < nSamplesGathered; n++) + { + for (uint32_t c = 0; c < m_pHost->GetChannels(); c++) + { + size_t nSampleID = nSampleOffset + (n * m_pHost->GetChannels() + c); + vDACBuffer[nSampleID] = short(std::clamp(vFloatBuffer[nSampleID] * fMaxSample, fMinSample, fMaxSample)); + } + } + + nSampleOffset += nSamplesGathered; + nSamplesToProcess -= nSamplesGathered; + } + } + + void Base::GetFullOutputBlock(std::vector& vFloatBuffer) + { + uint32_t nSamplesToProcess = m_pHost->GetBlockSampleCount(); + uint32_t nSampleOffset = 0; + while (nSamplesToProcess > 0) + { + uint32_t nSamplesGathered = m_pHost->FillOutputBuffer(vFloatBuffer, nSampleOffset, nSamplesToProcess); + + nSampleOffset += nSamplesGathered; + nSamplesToProcess -= nSamplesGathered; + } + } + } + + namespace synth + { + Property::Property(double f) + { + value = std::clamp(f, -1.0, 1.0); + } + + Property& Property::operator =(const double f) + { + value = std::clamp(f, -1.0, 1.0); + return *this; + } + + + ModularSynth::ModularSynth() + { + + } + + bool ModularSynth::AddModule(Module* pModule) + { + // Check if module already added + if (std::find(m_vModules.begin(), m_vModules.end(), pModule) == std::end(m_vModules)) + { + m_vModules.push_back(pModule); + return true; + } + + return false; + } + + bool ModularSynth::RemoveModule(Module* pModule) + { + if (std::find(m_vModules.begin(), m_vModules.end(), pModule) == std::end(m_vModules)) + { + m_vModules.erase(std::remove(m_vModules.begin(), m_vModules.end(), pModule), m_vModules.end()); + return true; + } + + return false; + } + + bool ModularSynth::AddPatch(Property* pInput, Property* pOutput) + { + // Does patch exist? + std::pair newPatch = std::pair(pInput, pOutput); + + if (std::find(m_vPatches.begin(), m_vPatches.end(), newPatch) == std::end(m_vPatches)) + { + // Patch doesnt exist, now check if either are null + if (pInput != nullptr && pOutput != nullptr) + { + m_vPatches.push_back(newPatch); + return true; + } + } + + return false; + } + + bool ModularSynth::RemovePatch(Property* pInput, Property* pOutput) + { + std::pair newPatch = std::pair(pInput, pOutput); + + if (std::find(m_vPatches.begin(), m_vPatches.end(), newPatch) == std::end(m_vPatches)) + { + m_vPatches.erase(std::remove(m_vPatches.begin(), m_vPatches.end(), newPatch), m_vPatches.end()); + return true; + } + + return false; + } + + void ModularSynth::UpdatePatches() + { + // Update patches + for (auto& patch : m_vPatches) + { + patch.second->value = patch.first->value; + } + } + + + void ModularSynth::Update(uint32_t nChannel, double dTime, double dTimeStep) + { + // Now update synth + for (auto& pModule : m_vModules) + { + pModule->Update(nChannel, dTime, dTimeStep); + } + } + + + namespace modules + { + void Oscillator::Update(uint32_t nChannel, double dTime, double dTimeStep) + { + // We use phase accumulation to combat change in parameter glitches + double w = frequency.value * max_frequency * dTimeStep; + phase_acc += w + lfo_input.value * frequency.value; + if (phase_acc >= 2.0) phase_acc -= 2.0; + + switch (waveform) + { + case Type::Sine: + output = amplitude.value * sin(phase_acc * 2.0 * 3.14159); + break; + + case Type::Saw: + output = amplitude.value * (phase_acc - 1.0) * 2.0; + break; + + case Type::Square: + output = amplitude.value * (phase_acc >= 1.0) ? 1.0 : -1.0; + break; + + case Type::Triangle: + output = amplitude.value * (phase_acc < 1.0) ? (phase_acc * 0.5) : (1.0 - phase_acc * 0.5); + break; + + case Type::PWM: + output = amplitude.value * (phase_acc >= (parameter.value + 1.0)) ? 1.0 : -1.0; + break; + + case Type::Wave: + if (pWave != nullptr) + output = amplitude.value * pWave->vChannelView[nChannel].GetSample(phase_acc * 0.5 * pWave->file.durationInSamples()); + break; + + case Type::Noise: + output = amplitude.value * rndDouble(-1.0, 1.0); + break; + + } + } + + double Oscillator::rndDouble(double min, double max) + { + return ((double)rnd() / (double)(0x7FFFFFFF)) * (max - min) + min; + } + + uint32_t Oscillator::rnd() + { + random_seed += 0xe120fc15; + uint64_t tmp; + tmp = (uint64_t)random_seed * 0x4a39b70d; + uint32_t m1 = uint32_t(((tmp >> 32) ^ tmp) & 0xFFFFFFFF); + tmp = (uint64_t)m1 * 0x12fad5c9; + uint32_t m2 = uint32_t(((tmp >> 32) ^ tmp) & 0xFFFFFFFF); + return m2; + } + } + } +} + + + +#if defined(SOUNDWAVE_USING_WINMM) +// WinMM Driver Implementation +namespace olc::sound::driver +{ +#pragma comment(lib, "winmm.lib") + + WinMM::WinMM(WaveEngine* pHost) : Base(pHost) + { } + + WinMM::~WinMM() + { + Stop(); + Close(); + } + + bool WinMM::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + // Device is available + WAVEFORMATEX waveFormat; + waveFormat.wFormatTag = WAVE_FORMAT_PCM; + waveFormat.nSamplesPerSec = m_pHost->GetSampleRate(); + waveFormat.wBitsPerSample = sizeof(short) * 8; + waveFormat.nChannels = m_pHost->GetChannels(); + waveFormat.nBlockAlign = (waveFormat.wBitsPerSample / 8) * waveFormat.nChannels; + waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; + waveFormat.cbSize = 0; + + // Open Device if valid + if (waveOutOpen(&m_hwDevice, WAVE_MAPPER, &waveFormat, (DWORD_PTR)WinMM::waveOutProc, (DWORD_PTR)this, CALLBACK_FUNCTION) != S_OK) + return false; + + // Allocate array of wave header objects, one per block + m_pWaveHeaders = std::make_unique(m_pHost->GetBlocks()); + + // Allocate block memory - I dont like vector of vectors, so going with this mess instead + // My std::vector's content will change, but their size never will - they are basically array now + m_pvBlockMemory = std::make_unique[]>(m_pHost->GetBlocks()); + for (size_t i = 0; i < m_pHost->GetBlocks(); i++) + m_pvBlockMemory[i].resize(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0); + + // Link headers to block memory - clever, so we only move headers about + // rather than memory... + for (unsigned int n = 0; n < m_pHost->GetBlocks(); n++) + { + m_pWaveHeaders[n].dwBufferLength = DWORD(m_pvBlockMemory[0].size() * sizeof(short)); + m_pWaveHeaders[n].lpData = (LPSTR)(m_pvBlockMemory[n].data()); + } + + // To begin with, all blocks are free + m_nBlockFree = m_pHost->GetBlocks(); + return true; + } + + bool WinMM::Start() + { + // Prepare driver thread for activity + m_bDriverLoopActive = true; + // and get it going! + m_thDriverLoop = std::thread(&WinMM::DriverLoop, this); + return true; + } + + void WinMM::Stop() + { + // Signal the driver loop to exit + m_bDriverLoopActive = false; + + // Wait for driver thread to exit gracefully + if (m_thDriverLoop.joinable()) + m_thDriverLoop.join(); + } + + void WinMM::Close() + { + waveOutClose(m_hwDevice); + } + + // Static Callback wrapper - specific instance is specified + void CALLBACK WinMM::waveOutProc(HWAVEOUT hWaveOut, UINT uMsg, DWORD_PTR dwInstance, DWORD dwParam1, DWORD dwParam2) + { + // All sorts of messages may be pinged here, but we're only interested + // in audio block is finished... + if (uMsg != WOM_DONE) return; + + // ...which has happened so allow driver object to free resource + WinMM* driver = (WinMM*)dwInstance; + driver->FreeAudioBlock(); + } + + void WinMM::FreeAudioBlock() + { + // Audio subsystem is done with the block it was using, thus + // making it available again + m_nBlockFree++; + + // Signal to driver loop that a block is now available. It + // could have been suspended waiting for one + std::unique_lock lm(m_muxBlockNotZero); + m_cvBlockNotZero.notify_one(); + } + + void WinMM::DriverLoop() + { + // We will be using this vector to transfer to the host for filling, with + // user sound data (float32, -1.0 --> +1.0) + std::vector vFloatBuffer(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0.0f); + + // While the system is active, start requesting audio data + while (m_bDriverLoopActive) + { + // Are there any blocks available to fill? ... + if (m_nBlockFree == 0) + { + // ...no, So wait until one is available + std::unique_lock lm(m_muxBlockNotZero); + while (m_nBlockFree == 0) // sometimes, Windows signals incorrectly + { + // This thread will suspend until this CV is signalled + // from FreeAudioBlock. + m_cvBlockNotZero.wait(lm); + } + } + + // ...yes, so use next one, by indicating one fewer + // block is available + m_nBlockFree--; + + // Prepare block for processing, by oddly, marking it as unprepared :P + if (m_pWaveHeaders[m_nBlockCurrent].dwFlags & WHDR_PREPARED) + { + waveOutUnprepareHeader(m_hwDevice, &m_pWaveHeaders[m_nBlockCurrent], sizeof(WAVEHDR)); + } + + // Give the userland the opportunity to fill the buffer. Note that the driver + // doesnt give a hoot about timing. Thats up to the SoundWave interface to + // maintain + + // Userland will populate a float buffer, that gets cleanly converted to + // a buffer of shorts for DAC + ProcessOutputBlock(vFloatBuffer, m_pvBlockMemory[m_nBlockCurrent]); + + // Send block to sound device + waveOutPrepareHeader(m_hwDevice, &m_pWaveHeaders[m_nBlockCurrent], sizeof(WAVEHDR)); + waveOutWrite(m_hwDevice, &m_pWaveHeaders[m_nBlockCurrent], sizeof(WAVEHDR)); + m_nBlockCurrent++; + m_nBlockCurrent %= m_pHost->GetBlocks(); + } + } +} // WinMM Driver Implementation +#endif +#if defined(SOUNDWAVE_USING_SDLMIXER) + +namespace olc::sound::driver +{ + + SDLMixer* SDLMixer::instance = nullptr; + + SDLMixer::SDLMixer(olc::sound::WaveEngine* pHost) + : Base(pHost) + { + instance = this; + } + + SDLMixer::~SDLMixer() + { + Stop(); + Close(); + } + + bool SDLMixer::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + auto errc = Mix_OpenAudioDevice(static_cast(m_pHost->GetSampleRate()), + AUDIO_F32, + static_cast(m_pHost->GetChannels()), + static_cast(m_pHost->GetBlockSampleCount()), + sOutputDevice == "DEFAULT" ? nullptr : sOutputDevice.c_str(), + SDL_AUDIO_ALLOW_FORMAT_CHANGE); + + // Query the actual format of the audio device, as we have allowed it to be changed. + if (errc || !Mix_QuerySpec(nullptr, &m_haveFormat, nullptr)) + { + std::cerr << "Failed to open audio device '" << sOutputDevice << "'" << std::endl; + return false; + } + + // Compute the Mix_Chunk buffer's size according to the format of the audio device + Uint32 bufferSize = 0; + switch (m_haveFormat) + { + case AUDIO_F32: + case AUDIO_S32: + bufferSize = m_pHost->GetBlockSampleCount() * m_pHost->GetChannels() * 4; + break; + case AUDIO_S16: + case AUDIO_U16: + bufferSize = m_pHost->GetBlockSampleCount() * m_pHost->GetChannels() * 2; + break; + case AUDIO_S8: + case AUDIO_U8: + bufferSize = m_pHost->GetBlockSampleCount() * m_pHost->GetChannels() * 1; + break; + default: + std::cerr << "Audio format of device '" << sOutputDevice << "' is not supported" << std::endl; + return false; + } + + // Allocate the buffer once. The size will never change after this + audioBuffer.resize(bufferSize); + audioChunk = { + 0, // 0, as the chunk does not own the array + audioBuffer.data(), // Pointer to data array + bufferSize, // Size in bytes + 128 // Volume; max by default as it's not controlled by the driver. + }; + + return true; + } + + template + void ConvertFloatTo(const std::vector& fromArr, Int* toArr) + { + static auto minVal = static_cast(std::numeric_limits::min()); + static auto maxVal = static_cast(std::numeric_limits::max()); + for (size_t i = 0; i != fromArr.size(); ++i) + { + toArr[i] = static_cast(std::clamp(fromArr[i] * maxVal, minVal, maxVal)); + } + } + + void SDLMixer::FillChunkBuffer(const std::vector& userData) const + { + // Since the audio device might have changed the format we need to provide, + // we convert the wave data from the user to that format. + switch (m_haveFormat) + { + case AUDIO_F32: + memcpy(audioChunk.abuf, userData.data(), audioChunk.alen); + break; + case AUDIO_S32: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_S16: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_U16: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_S8: + ConvertFloatTo(userData, reinterpret_cast(audioChunk.abuf)); + break; + case AUDIO_U8: + ConvertFloatTo(userData, audioChunk.abuf); + break; + } + } + + void SDLMixer::SDLMixerCallback(int channel) + { + static std::vector userData(instance->m_pHost->GetBlockSampleCount() * instance->m_pHost->GetChannels()); + + if (channel != 0) + { + std::cerr << "Unexpected channel number" << std::endl; + } + + // Don't add another chunk if we should not keep running + if (!instance->m_keepRunning) + return; + + instance->GetFullOutputBlock(userData); + instance->FillChunkBuffer(userData); + + if (Mix_PlayChannel(0, &instance->audioChunk, 0) == -1) + { + std::cerr << "Error while playing Chunk" << std::endl; + } + } + + bool SDLMixer::Start() + { + m_keepRunning = true; + + // Kickoff the audio driver + SDLMixerCallback(0); + + // SDLMixer handles all other calls to reinsert user data + Mix_ChannelFinished(SDLMixerCallback); + return true; + } + + void SDLMixer::Stop() + { + m_keepRunning = false; + + // Stop might be called multiple times, so we check whether the device is already closed + if (Mix_QuerySpec(nullptr, nullptr, nullptr)) + { + for (int i = 0; i != m_pHost->GetChannels(); ++i) + { + if (Mix_Playing(i)) + Mix_HaltChannel(i); + } + } + } + + void SDLMixer::Close() + { + Mix_CloseAudio(); + } +} + +#endif // SOUNDWAVE_USING_SDLMIXER +#if defined(SOUNDWAVE_USING_ALSA) +// ALSA Driver Implementation +namespace olc::sound::driver +{ + ALSA::ALSA(WaveEngine* pHost) : Base(pHost) + { } + + ALSA::~ALSA() + { + Stop(); + Close(); + } + + bool ALSA::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + // Open PCM stream + int rc = snd_pcm_open(&m_pPCM, "default", SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK); + + // Clear global cache. + // This won't affect users who don't want to create multiple instances of this driver, + // but it will prevent valgrind from whining about "possibly lost" memory. + // If the user's ALSA setup uses a PulseAudio plugin, then valgrind will still compain + // about some "still reachable" data used by that plugin. TODO? + snd_config_update_free_global(); + + if (rc < 0) + return false; + + // Prepare the parameter structure and set default parameters + snd_pcm_hw_params_t* params; + snd_pcm_hw_params_alloca(¶ms); + snd_pcm_hw_params_any(m_pPCM, params); + + // Set other parameters + snd_pcm_hw_params_set_access(m_pPCM, params, SND_PCM_ACCESS_RW_INTERLEAVED); + snd_pcm_hw_params_set_format(m_pPCM, params, SND_PCM_FORMAT_FLOAT); + snd_pcm_hw_params_set_rate(m_pPCM, params, m_pHost->GetSampleRate(), 0); + snd_pcm_hw_params_set_channels(m_pPCM, params, m_pHost->GetChannels()); + snd_pcm_hw_params_set_period_size(m_pPCM, params, m_pHost->GetBlockSampleCount(), 0); + snd_pcm_hw_params_set_periods(m_pPCM, params, m_pHost->GetBlocks(), 0); + + // Save these parameters + rc = snd_pcm_hw_params(m_pPCM, params); + if (rc < 0) + return false; + + return true; + } + + bool ALSA::Start() + { + // Unsure if really needed, helped prevent underrun on my setup + std::vector vSilence(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0.0f); + snd_pcm_start(m_pPCM); + for (unsigned int i = 0; i < m_pHost->GetBlocks(); i++) + snd_pcm_writei(m_pPCM, vSilence.data(), m_pHost->GetBlockSampleCount()); + + m_rBuffers.Resize(m_pHost->GetBlocks(), m_pHost->GetBlockSampleCount() * m_pHost->GetChannels()); + + snd_pcm_start(m_pPCM); + m_bDriverLoopActive = true; + m_thDriverLoop = std::thread(&ALSA::DriverLoop, this); + + return true; + } + + void ALSA::Stop() + { + // Signal the driver loop to exit + m_bDriverLoopActive = false; + + // Wait for driver thread to exit gracefully + if (m_thDriverLoop.joinable()) + m_thDriverLoop.join(); + + if (m_pPCM != nullptr) + snd_pcm_drop(m_pPCM); + } + + void ALSA::Close() + { + if (m_pPCM != nullptr) + { + snd_pcm_close(m_pPCM); + m_pPCM = nullptr; + } + // Clear the global cache again for good measure + snd_config_update_free_global(); + } + + void ALSA::DriverLoop() + { + const uint32_t nFrames = m_pHost->GetBlockSampleCount(); + + int err; + std::vector vFDs; + + int nFDs = snd_pcm_poll_descriptors_count(m_pPCM); + if (nFDs < 0) + { + std::cerr << "snd_pcm_poll_descriptors_count returned " << nFDs << "\n"; + std::cerr << "disabling polling\n"; + nFDs = 0; + } + else + { + vFDs.resize(nFDs); + + err = snd_pcm_poll_descriptors(m_pPCM, vFDs.data(), vFDs.size()); + if (err < 0) + { + std::cerr << "snd_pcm_poll_descriptors returned " << err << "\n"; + std::cerr << "disabling polling\n"; + vFDs = {}; + } + } + + // While the system is active, start requesting audio data + while (m_bDriverLoopActive) + { + if (!m_rBuffers.IsFull()) + { + // Grab some audio data + auto& vFreeBuffer = m_rBuffers.GetFreeBuffer(); + GetFullOutputBlock(vFreeBuffer); + } + + // Wait a bit if our buffer is full + auto avail = snd_pcm_avail_update(m_pPCM); + while (m_rBuffers.IsFull() && avail < nFrames) + { + if (vFDs.size() == 0) break; + + err = poll(vFDs.data(), vFDs.size(), -1); + if (err < 0) + std::cerr << "poll returned " << err << "\n"; + + unsigned short revents; + err = snd_pcm_poll_descriptors_revents(m_pPCM, vFDs.data(), vFDs.size(), &revents); + if (err < 0) + std::cerr << "snd_pcm_poll_descriptors_revents returned " << err << "\n"; + + if (revents & POLLERR) + std::cerr << "POLLERR\n"; + + avail = snd_pcm_avail_update(m_pPCM); + } + + // Write whatever we can + while (!m_rBuffers.IsEmpty() && avail >= nFrames) + { + auto vFullBuffer = m_rBuffers.GetFullBuffer(); + uint32_t nWritten = 0; + + while (nWritten < nFrames) + { + auto err = snd_pcm_writei(m_pPCM, vFullBuffer.data() + nWritten, nFrames - nWritten); + if (err > 0) + nWritten += err; + else + { + std::cerr << "snd_pcm_writei returned " << err << "\n"; + break; + } + } + avail = snd_pcm_avail_update(m_pPCM); + } + } + } +} // ALSA Driver Implementation +#endif +#if defined(SOUNDWAVE_USING_PULSE) +// PULSE Driver Implementation +#include +#include + +namespace olc::sound::driver +{ + PulseAudio::PulseAudio(WaveEngine* pHost) : Base(pHost) + { } + + PulseAudio::~PulseAudio() + { + Stop(); + Close(); + } + + bool PulseAudio::Open(const std::string& sOutputDevice, const std::string& sInputDevice) + { + pa_sample_spec ss{ + PA_SAMPLE_FLOAT32, m_pHost->GetSampleRate(), (uint8_t)m_pHost->GetChannels() + }; + + m_pPA = pa_simple_new(NULL, "olcSoundWaveEngine", PA_STREAM_PLAYBACK, NULL, + "Output Stream", &ss, NULL, NULL, NULL); + + if (m_pPA == NULL) + return false; + + return true; + } + + bool PulseAudio::Start() + { + m_bDriverLoopActive = true; + m_thDriverLoop = std::thread(&PulseAudio::DriverLoop, this); + + return true; + } + + void PulseAudio::Stop() + { + // Signal the driver loop to exit + m_bDriverLoopActive = false; + + // Wait for driver thread to exit gracefully + if (m_thDriverLoop.joinable()) + m_thDriverLoop.join(); + } + + void PulseAudio::Close() + { + if (m_pPA != nullptr) + { + pa_simple_free(m_pPA); + m_pPA = nullptr; + } + } + + void PulseAudio::DriverLoop() + { + // We will be using this vector to transfer to the host for filling, with + // user sound data (float32, -1.0 --> +1.0) + std::vector vFloatBuffer(m_pHost->GetBlockSampleCount() * m_pHost->GetChannels(), 0.0f); + + // While the system is active, start requesting audio data + while (m_bDriverLoopActive) + { + // Grab audio data from user + GetFullOutputBlock(vFloatBuffer); + + // Fill PulseAudio data buffer + int error; + if (pa_simple_write(m_pPA, vFloatBuffer.data(), + vFloatBuffer.size() * sizeof(float), &error) < 0) + { + std::cerr << "Failed to feed data to PulseAudio: " << pa_strerror(error) << "\n"; + } + } + } +} // PulseAudio Driver Implementation +#endif + +#endif // OLC_SOUNDWAVE IMPLEMENTATION +#endif // OLC_SOUNDWAVE_H diff --git a/Example/olcUTIL_DataFile.h b/Example/olcUTIL_DataFile.h new file mode 100644 index 0000000..3e38880 --- /dev/null +++ b/Example/olcUTIL_DataFile.h @@ -0,0 +1,490 @@ +/* +OneLoneCoder - DataFile v1.00 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +An "easy to use" serialisation/deserialisation class that yields +human readable hierachical files. + +License (OLC-3) +~~~~~~~~~~~~~~~ + +Copyright 2018 - 2022 OneLoneCoder.com + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions or derivations of source code must retain the above +copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions or derivative works in binary form must reproduce +the above copyright notice. This list of conditions and the following +disclaimer must be reproduced in the documentation and/or other +materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Links +~~~~~ +YouTube: https://www.youtube.com/javidx9 +Discord: https://discord.gg/WhwHUMV +Twitter: https://www.twitter.com/javidx9 +Twitch: https://www.twitch.tv/javidx9 +GitHub: https://www.github.com/onelonecoder +Homepage: https://www.onelonecoder.com + +Author +~~~~~~ +David Barr, aka javidx9, �OneLoneCoder 2019, 2020, 2021, 2022 + +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +int operator ""_I(const char*key,std::size_t len); + +namespace olc::utils +{ + class datafile + { + public: + inline datafile() = default; + inline static bool DEBUG_ACCESS_OPTIONS=false; + + public: + // Sets the String Value of a Property (for a given index) + inline void SetString(const std::string& sString, const size_t nItem = 0) + { + if (nItem >= m_vContent.size()) + m_vContent.resize(nItem + 1); + + m_vContent[nItem] = sString; + } + + // Retrieves the String Value of a Property (for a given index) or "" + inline const std::string GetString(const size_t nItem = 0) const + { + if (nItem >= m_vContent.size()){ + std::cout<<"WARNING! Accesing out-of-bounds list item "< GetValues() const + { + return m_vContent; + } + + // Checks if a property exists - useful to avoid creating properties + // via reading them, though non-essential + inline bool HasProperty(const std::string& sName) const + { + return m_mapObjects.count(sName) > 0; + } + + // Access a datafile via a convenient name - "root.node.something.property" + inline datafile& GetProperty(const std::string& name) + { + size_t x = name.find_first_of('.'); + if (x != std::string::npos) + { + std::string sProperty = name.substr(0, x); + lastAccessedProperty=sProperty; + if (HasProperty(sProperty)) + return operator[](sProperty).GetProperty(name.substr(x + 1, name.size())); + else { + std::cout<<"WARNING! Could not read Property "< write = [&](const datafile& n, std::ofstream& file) + { + // Lambda creates string given indentation preferences + auto indent = [&](const std::string& sString, const size_t nCount) + { + std::string sOut; + for (size_t n = 0; n < nCount; n++) sOut += sString; + return sOut; + }; + + // Iterate through each property of this node + for (auto const& property : n.m_vecObjects) + { + // Does property contain any sub objects? + if (property.second.m_vecObjects.empty()) + { + // No, so it's an assigned field and should just be written. If the property + // is flagged as comment, it has no assignment potential. First write the + // property name + file << indent(sIndent, nIndentCount) << property.first << (property.second.m_bIsComment ? "" : " = "); + + // Second, write the property value (or values, seperated by provided + // separation charater + size_t nItems = property.second.GetValueCount(); + for (size_t i = 0; i < property.second.GetValueCount(); i++) + { + // If the Value being written, in string form, contains the separation + // character, then the value must be written inside quotation marks. Note, + // that if the Value is the last of a list of Values for a property, it is + // not suffixed with the separator + size_t x = property.second.GetString(i).find_first_of(sListSep); + if (x != std::string::npos) + { + // Value contains separator, so wrap in quotes + file << "\"" << property.second.GetString(i) << "\"" << ((nItems > 1) ? sSeperator : ""); + } + else + { + // Value does not contain separator, so just write out + file << property.second.GetString(i) << ((nItems > 1) ? sSeperator : ""); + } + nItems--; + } + + // Property written, move to next line + file << "\n"; + } + else + { + // Yes, property has properties of its own, so it's a node + // Force a new line and write out the node's name + file << "\n" << indent(sIndent, nIndentCount) << property.first << "\n"; + // Open braces, and update indentation + file << indent(sIndent, nIndentCount) << "{\n"; + nIndentCount++; + // Recursively write that node + write(property.second, file); + // Node written, so close braces + file << indent(sIndent, nIndentCount) << "}\n\n"; + } + } + + // We've finished writing out a node, regardless of state, our indentation + // must decrease, unless we're top level + if (nIndentCount > 0) nIndentCount--; + }; + + // Start Here! Open the file for writing + std::ofstream file(sFileName); + if (file.is_open()) + { + // Write the file starting form the supplied node + write(n, file); + return true; + } + return false; + } + + inline static bool Read(datafile& n, const std::string& sFileName, const char sListSep = ',') + { + // Open the file! + std::ifstream file(sFileName); + if (file.is_open()) + { + // These variables are outside of the read loop, as we will + // need to refer to previous iteration values in certain conditions + std::string sPropName = ""; + std::string sPropValue = ""; + + // The file is fundamentally structured as a stack, so we will read it + // in a such, but note the data structure in memory is not explicitly + // stored in a stack, but one is constructed implicitly via the nodes + // owning other nodes (aka a tree) + + // I dont want to accidentally create copies all over the place, nor do + // I want to use pointer syntax, so being a bit different and stupidly + // using std::reference_wrapper, so I can store references to datafile + // nodes in a std::container. + std::stack> stkPath; + stkPath.push(n); + + + // Read file line by line and process + while (!file.eof()) + { + // Read line + std::string line; + std::getline(file, line); + + // This little lambda removes whitespace from + // beginning and end of supplied string + auto trim = [](std::string& s) + { + s.erase(0, s.find_first_not_of(" \t\n\r\f\v")); + s.erase(s.find_last_not_of(" \t\n\r\f\v") + 1); + }; + + trim(line); + + // If line has content + if (!line.empty()) + { + // Test if its a comment... + if (line[0] == '#') + { + // ...it is a comment, so ignore + datafile comment; + comment.m_bIsComment = true; + stkPath.top().get().m_vecObjects.push_back({ line, comment }); + } + else + { + // ...it is content, so parse. Firstly, find if the line + // contains an assignment. If it does then it's a property... + size_t x = line.find_first_of('='); + if (x != std::string::npos) + { + // ...so split up the property into a name, and its values! + + // Extract the property name, which is all characters up to + // first assignment, trim any whitespace from ends + sPropName = line.substr(0, x); + trim(sPropName); + + // Extract the property value, which is all characters after + // the first assignment operator, trim any whitespace from ends + sPropValue = line.substr(x + 1, line.size()); + trim(sPropValue); + + // The value may be in list form: a, b, c, d, e, f etc and some of those + // elements may exist in quotes a, b, c, "d, e", f. So we need to iterate + // character by character and break up the value + bool bInQuotes = false; + std::string sToken; + size_t nTokenCount = 0; + for (const auto c : sPropValue) + { + // Is character a quote... + if (c == '\"') + { + // ...yes, so toggle quote state + bInQuotes = !bInQuotes; + } + else + { + // ...no, so proceed creating token. If we are in quote state + // then just append characters until we exit quote state. + if (bInQuotes) + { + sToken.append(1, c); + } + else + { + // Is the character our seperator? If it is + if (c == sListSep) + { + // Clean up the token + trim(sToken); + // Add it to the vector of values for this property + stkPath.top().get()[sPropName].SetString(sToken, nTokenCount); + // Reset our token state + sToken.clear(); + nTokenCount++; + } + else + { + // It isnt, so just append to token + sToken.append(1, c); + } + } + } + } + + // Any residual characters at this point just make up the final token, + // so clean it up and add it to the vector of values + if (!sToken.empty()) + { + trim(sToken); + stkPath.top().get()[sPropName].SetString(sToken, nTokenCount); + } + } + else + { + // ...but if it doesnt, then it's something structural + if (line[0] == '{') + { + // Open brace, so push this node to stack, subsequent properties + // will belong to the new node + stkPath.push(stkPath.top().get()[sPropName]); + } + else + { + if (line[0] == '}') + { + // Close brace, so this node has been defined, pop it from the + // stack + stkPath.pop(); + } + else + { + // Line is a property with no assignment. Who knows whether this is useful, + // but we can simply add it as a valueless property... + sPropName = line; + // ...actually it is useful, as valuless properties are typically + // going to be the names of new datafile nodes on the next iteration + } + } + } + } + } + } + + // Close and exit! + file.close(); + return true; + } + + // File not found, so fail + return false; + } + + public: + inline datafile& operator[](const std::string& name) + { + // Check if this "node"'s map already contains an object with this name... + if (m_mapObjects.count(name) == 0) + { + // ...it did not! So create this object in the map. First get a vector id + // and link it with the name in the unordered_map + m_mapObjects[name] = m_vecObjects.size(); + // then creating the new, blank object in the vector of objects + m_vecObjects.push_back({ name, datafile() }); + } + + // ...it exists! so return the object, by getting its index from the map, and using that + // index to look up a vector element. + return m_vecObjects[m_mapObjects[name]].second; + } + + private: + // The "list of strings" that make up a property value + std::vector m_vContent; + + // Linkage to create "ordered" unordered_map. We have a vector of + // "properties", and the index to a specific element is mapped. + std::vector> m_vecObjects; + std::unordered_map m_mapObjects; + + inline static std::string lastAccessedProperty=""; + + protected: + // Used to identify if a property is a comment or not, not user facing + bool m_bIsComment = false; + }; + + class datafilestringdata + { + std::reference_wrapperdata; + std::string key; + public: + inline datafilestringdata(datafile&dat,std::string key) + :data(dat),key(key){}; + std::string operator[](int index){return data.get().GetProperty(key).GetString(index);}; + }; + class datafileintdata + { + std::reference_wrapperdata; + std::string key; + public: + inline datafileintdata(datafile&dat,std::string key) + :data(dat),key(key){}; + int operator[](int index){ + return data.get().GetProperty(key).GetInt(index); + }; + }; + class datafilefloatdata + { + std::reference_wrapperdata; + std::string key; + public: + inline datafilefloatdata(datafile&dat,std::string key) + :data(dat),key(key){}; + float operator[](int index){return data.get().GetProperty(key).GetReal(index);}; + }; + class datafiledoubledata + { + std::reference_wrapperdata; + std::string key; + public: + inline datafiledoubledata(datafile&dat,std::string key) + :data(dat),key(key){}; + double operator[](int index){return data.get().GetProperty(key).GetReal(index);}; + }; +} \ No newline at end of file diff --git a/Example/olcUTIL_Geometry2D.h b/Example/olcUTIL_Geometry2D.h new file mode 100644 index 0000000..1d66f41 --- /dev/null +++ b/Example/olcUTIL_Geometry2D.h @@ -0,0 +1,1097 @@ +/* +OneLoneCoder - Geometry 2D v1.01 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +A collection of 2D Geometric primitives and functions to work with +and between them. + + +License (OLC-3) +~~~~~~~~~~~~~~~ + +Copyright 2018 - 2022 OneLoneCoder.com + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions or derivations of source code must retain the above +copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions or derivative works in binary form must reproduce +the above copyright notice. This list of conditions and the following +disclaimer must be reproduced in the documentation and/or other +materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Links +~~~~~ +YouTube: https://www.youtube.com/javidx9 +Discord: https://discord.gg/WhwHUMV +Twitter: https://www.twitter.com/javidx9 +Twitch: https://www.twitch.tv/javidx9 +GitHub: https://www.github.com/onelonecoder +Homepage: https://www.onelonecoder.com + +Author +~~~~~~ +David Barr, aka javidx9, �OneLoneCoder 2019, 2020, 2021, 2022 + +Changes: +v1.01: +Made constants inline ++Header guards (lol... sigh...) + +*/ + +#pragma once +#include "olcPixelGameEngine.h" + +namespace olc::utils::geom2d +{ + // Lemon Meringue + inline const double pi = 3.141592653589793238462643383279502884; + + // Floating point error margin + inline const double epsilon = 0.001; + + //https://stackoverflow.com/questions/1903954/is-there-a-standard-sign-function-signum-sgn-in-c-c + template + constexpr int sgn(T val) { return (T(0) < val) - (val < T(0)); } + + // Defines a line segment + template + struct line + { + olc::v2d_generic start; + olc::v2d_generic end; + + inline line(const olc::v2d_generic& s = { T(0), T(0) }, + const olc::v2d_generic& e = { T(0), T(0) }) + : start(s), end(e) + { } + + + // Get length of line + inline constexpr T length() + { + return (end - start).mag(); + } + + // Get length of line^2 + inline constexpr T length2() + { + return (end - start).mag2(); + } + + inline constexpr olc::v2d_generic vector() const + { + return (end - start); + } + + // Given a real distance, get point along line + inline constexpr olc::v2d_generic rpoint(const T& distance) const + { + return start + (end - start).norm() * distance; + } + + // Given a unit distance, get point along line + inline constexpr olc::v2d_generic upoint(const T& distance) const + { + return start + (end - start) * distance; + } + + // Return which side of the line does a point lie + inline constexpr int32_t side(const olc::v2d_generic& point) const + { + double d = (end - start).cross(point - start); + if (d < 0) + return -1; + else + if (d > 0) + return 1; + else + return 0; + } + }; + + template + struct ray + { + olc::v2d_generic origin; + olc::v2d_generic direction; + }; + + template + struct rect + { + olc::v2d_generic pos; + olc::v2d_generic size; + + inline rect(const olc::v2d_generic& p = { T(0), T(0) }, + const olc::v2d_generic& s = { T(1), T(1) }) + : pos(p), size(s) + { } + + inline olc::v2d_generic middle() const + { + return pos + (size * double(0.5)); + } + + // Get line segment from top side of rectangle + inline line top() const + { + return { pos, {pos.x + size.x, pos.y } }; + } + + // Get line segment from bottom side of rectangle + inline line bottom() const + { + return { {pos.x, pos.y + size.y}, pos + size }; + } + + // Get line segment from left side of rectangle + inline line left() const + { + return { pos, {pos.x, pos.y + size.y} }; + } + + // Get line segment from right side of rectangle + inline line right() const + { + return { {pos.x + size.x, pos.y }, pos + size }; + } + + // Get a line from an indexed side, starting top, going clockwise + inline line side(const size_t i) const + { + if (i & (0b11 == 0)) return top(); + if (i & (0b11 == 1)) return right(); + if (i & (0b11 == 2)) return bottom(); + if (i & (0b11 == 3)) return left(); + } + + // Get area of rectangle + inline constexpr T area() const + { + return size.x * size.y; + } + + // Get perimeter of rectangle + inline constexpr T perimeter() const + { + return T(2) * (size.x + size.y); + } + }; + + template + struct ellipse + { + olc::v2d_generic pos; + olc::v2d_generic radius; + + inline ellipse(const olc::v2d_generic& p = { T(0), T(0) }, const olc::v2d_generic r = {T(1),T(1)}) + : pos(p), radius(r) + { } + + // Get area of ellipse + inline constexpr T area() const + { + return T(pi) * radius.x * radius.y; + } + + // Get perimeter of an ellipse + inline constexpr T perimeter() const + { + return T(2.0 * pi) * sqrt((radius.x*radius.x+radius.y*radius.y)/(2*1.0)); + } + + // Get circumference of ellipse. Which is the same as a permieter of one. + inline constexpr T circumference() const + { + return perimeter(); + } + }; + + template + struct circle + { + olc::v2d_generic pos; + T radius = T(0); + + inline circle(const olc::v2d_generic& p = { T(0), T(0) }, const T r = T(0)) + : pos(p), radius(r) + { } + + // Get area of circle + inline constexpr T area() const + { + return T(pi) * radius * radius; + } + + // Get circumference of circle + inline constexpr T perimeter() const + { + return T(2.0 * pi) * radius; + } + + // Get circumference of circle + inline constexpr T circumference() const + { + return perimeter(); + } + }; + + + template + struct triangle + { + std::array, 3> pos; + + inline triangle( + const olc::v2d_generic& p0 = { T(0), T(0) }, + const olc::v2d_generic& p1 = { T(0), T(0) }, + const olc::v2d_generic& p2 = { T(0), T(0) }) + : pos{ p0,p1,p2 } + { } + + // Get a line from an indexed side, starting top, going clockwise + inline line side(const size_t i) const + { + return line(pos[i % 3], pos[(i + 1) % 3]); + } + + // Get area of triangle + inline constexpr T area() const + { + return double(0.5) * std::abs( + (pos[0].x * (pos[1].y - pos[2].y)) + + (pos[1].x * (pos[2].y - pos[0].y)) + + (pos[2].x * (pos[0].y - pos[1].y))); + } + + // Get perimeter of triangle + inline constexpr T perimeter() const + { + return line(pos[0], pos[1]).length() + + line(pos[1], pos[2]).length() + + line(pos[2], pos[0]).length(); + } + }; + + + template + struct polygon + { + std::vector> vPoints; + }; + + + // ========================================================================================================================= + // Closest(shape, point) =================================================================================================== + + // Returns closest point to point + template + inline olc::v2d_generic closest(const olc::v2d_generic& p1, const olc::v2d_generic& p2) + { + return p1; + } + + // Returns closest point on line to point + template + inline olc::v2d_generic closest(const line& l, const olc::v2d_generic& p) + { + auto d = l.vector(); + double u = std::clamp(double(d.dot(p - l.start)) / d.mag2(), 0.0, 1.0); + return l.start + u * d; + } + + // Returns closest point on circle to point + template + inline olc::v2d_generic closest(const circle& c, const olc::v2d_generic& p) + { + return c.pos + olc::vd2d(p - c.pos).norm() * c.radius; + } + + // Returns closest point on rectangle to point + template + inline olc::v2d_generic closest(const rect& r, const olc::v2d_generic& p) + { + // This could be a "constrain" function hmmmm + // TODO: Not quite what i wanted, should restrain to boundary + return olc::v2d_generic{ std::clamp(p.x, r.pos.x, r.pos.x + r.size.x), std::clamp(p.y, r.pos.y, r.pos.y + r.size.y) }; + + } + + // Returns closest point on triangle to point + template + inline olc::v2d_generic closest(const triangle& t, const olc::v2d_generic& p) + { + olc::utils::geom2d::line l{t.pos[0], t.pos[1]}; + auto p0 = closest(l, p); + auto d0 = (p0 - p).mag2(); + + l.end = t.pos[2]; + auto p1 = closest(l, p); + auto d1 = (p1 - p).mag2(); + + l.start = t.pos[1]; + auto p2 = closest(l, p); + auto d2 = (p2 - p).mag2(); + + if((d0 <= d1) && (d0 <= d2)) { + return p0; + } else if((d1 <= d0) && (d1 <= d2)) { + return p1; + } else { + return p2; + } + } + + + + + + + + + + + + // ================================================================================================================ + // POINT ========================================================================================================== + + // Checks if point contains point + template + inline constexpr bool contains(const olc::v2d_generic& p1, const olc::v2d_generic& p2) + { + return (p1 - p2).mag2() < epsilon; + } + + // Checks if line contains point + template + inline constexpr bool contains(const line& l, const olc::v2d_generic& p) + { + double d = ((p.x - l.start.x) * (l.end.y - l.start.y) - (p.y - l.start.y) * (l.end.x - l.start.x)); + if (std::abs(d) < epsilon) + { + // point is on line + double u = l.vector().dot(p - l.start) / l.vector().mag2(); + return (u >= double(0.0) && u <= double(1.0)); + } + + return false; + } + + // Checks if rectangle contains point + template + inline constexpr bool contains(const rect& r, const olc::v2d_generic& p) + { + return !(p.x < r.pos.x || p.y < r.pos.y || + p.x > (r.pos.x + r.size.x) || p.y > (r.pos.y + r.size.y)); + } + + // Checks if ellipse contains a point + template + inline constexpr bool contains(const ellipse& c, const olc::v2d_generic& p) + { + return std::pow(p.x-c.pos.x,2)/(c.radius.x*c.radius.x)+std::pow(p.y-c.pos.y,2)/(c.radius.y*c.radius.y)<1; + } + + // Checks if circle contains a point + template + inline constexpr bool contains(const circle& c, const olc::v2d_generic& p) + { + return (c.pos - p).mag2() < (c.radius * c.radius); + } + + // Checks if triangle contains a point + template + inline constexpr bool contains(const triangle& t, const olc::v2d_generic& p) + { + // http://jsfiddle.net/PerroAZUL/zdaY8/1/ + T2 A = T2(0.5) * (-t.pos[1].y * t.pos[2].x + t.pos[0].y * (-t.pos[1].x + t.pos[2].x) + t.pos[0].x * (t.pos[1].y - t.pos[2].y) + t.pos[1].x * t.pos[2].y); + T2 sign = A < T2(0) ? T2(-1) : T2(1); + T2 s = (t.pos[0].y * t.pos[2].x - t.pos[0].x * t.pos[2].y + (t.pos[2].y - t.pos[0].y) * p.x + (t.pos[0].x - t.pos[2].x) * p.y) * sign; + T2 v = (t.pos[0].x * t.pos[1].y - t.pos[0].y * t.pos[1].x + (t.pos[0].y - t.pos[1].y) * p.x + (t.pos[1].x - t.pos[0].x) * p.y) * sign; + return s > T2(0) && v > T2(0) && (s + v) < T2(2) * A * sign; + } + + + + + // Check if point overlaps with point (analagous to contains()) + template + inline constexpr bool overlaps(const olc::v2d_generic& p1, const olc::v2d_generic& p2) + { + return contains(p1, p2); + } + + // Checks if line segment overlaps with point + template + inline constexpr bool overlaps(const line& l, const olc::v2d_generic& p) + { + return contains(l, p); + } + + // Checks if rectangle overlaps with point + template + inline constexpr bool overlaps(const rect& r, const olc::v2d_generic& p) + { + return contains(r, p); + } + + // Checks if circle overlaps with point + template + inline constexpr bool overlaps(const circle& c, const olc::v2d_generic& p) + { + return contains(c, p); + } + + // Checks if triangle overlaps with point + template + inline constexpr bool overlaps(const triangle& t, const olc::v2d_generic& p) + { + return contains(t, p); + } + + + + + // Get intersection points where point intersects with point + template + inline std::vector> intersects(const olc::v2d_generic& p1, const olc::v2d_generic& p2) + { + if (contains(p1, p2)) + return { p1 }; + else + return {}; + } + + // Get intersection points where line segment intersects with point + template + inline std::vector> intersects(const line& l, const olc::v2d_generic& p) + { + if (contains(l, p)) + return { p }; + else + return {}; + } + + // Get intersection points where rectangle intersects with point + template + inline std::vector> intersects(const rect& r, const olc::v2d_generic& p) + { + std::vector> vPoints; + if (contains(r.top(), p)) vPoints.push_back(p); + if (contains(r.bottom(), p)) vPoints.push_back(p); + if (contains(r.left(), p)) vPoints.push_back(p); + if (contains(r.right(), p)) vPoints.push_back(p); + return vPoints; + } + + // Get intersection points where circle intersects with point + template + inline std::vector> intersects(const circle& c, const olc::v2d_generic& p) + { + if (std::abs((p - c.pos).mag2() - (c.radius * c.radius)) <= epsilon) + return { p }; + else + return {}; + } + + // Get intersection points where triangle intersects with point + template + inline std::vector> intersects(const triangle& r, const olc::v2d_generic& p) + { + // TODO: + return {}; + } + + + + + + + + + + + + + // ================================================================================================================ + // LINE =========================================================================================================== + + // Check if point contains line segment + template + inline constexpr bool contains(const olc::v2d_generic& p, const line& l) + { + return false; // It can't! + } + + // Check if line segment contains line segment + template + inline constexpr bool contains(const line& l1, const line& l2) + { + // TODO: Check if segments are colinear, and l1 exists within bounds of l2 + return false; + } + + // Check if rectangle contains line segment + template + inline constexpr bool contains(const rect& r, const line& l) + { + return contains(r, l.start) && contains(r, l.end); + } + + // Check if circle contains line segment + template + inline constexpr bool contains(const circle& c1, const line& l) + { + return contains(c1, l.start) && contains(c1, l.end); + } + + // Check if triangle contains line segment + template + inline constexpr bool contains(const triangle& t, const line& l) + { + return contains(t, l.start) && contains(t, l.end); + } + + + + + // Check if point overlaps line segment + template + inline constexpr bool overlaps(const olc::v2d_generic& p, const line& l) + { + return contains(l, p); + } + + // Check if line segment overlaps line segment + template + inline constexpr bool overlaps(const line& l1, const line& l2) + { + float uA = ((l2.end.x-l2.start.x)*(l1.start.y-l2.start.y) - (l2.end.y-l2.start.y)*(l1.start.x-l2.start.x)) / ((l2.end.y-l2.start.y)*(l1.end.x-l1.start.x) - (l2.end.x-l2.start.x)*(l1.end.y-l1.start.y)); + float uB = ((l1.end.x-l1.start.x)*(l1.start.y-l2.start.y) - (l1.end.y-l1.start.y)*(l1.start.x-l2.start.x)) / ((l2.end.y-l2.start.y)*(l1.end.x-l1.start.x) - (l2.end.x-l2.start.x)*(l1.end.y-l1.start.y)); + return uA >= 0 && uA <= 1 && uB >= 0 && uB <= 1; + } + + // Check if rectangle overlaps line segment + template + inline constexpr bool overlaps(const rect& r, const line& l) + { + return overlaps(r.left(),l)|| + overlaps(r.top(),l)|| + overlaps(r.bottom(),l)|| + overlaps(r.right(),l); + } + + // Check if circle overlaps line segment + template + inline constexpr bool overlaps(const circle& c, const line& l) + { + // TODO: + return false; + } + + // Check if triangle overlaps line segment + template + inline constexpr bool overlaps(const triangle& t, const line& l) + { + return overlaps(t, l.start) || overlaps(t, l.end); + + // TODO: This method is no good, it cant detect lines whose start and end + // points are outside the triangle + } + + + + + // Get intersection points where point intersects with line segment + template + inline std::vector> intersects(const olc::v2d_generic& p, const line& l) + { + // TODO: + return {}; + } + + // Get intersection points where line segment intersects with line segment + template + inline std::vector> intersects(const line& l1, const line& l2) + { + float uA = ((l2.end.x-l2.start.x)*(l1.start.y-l2.start.y) - (l2.end.y-l2.start.y)*(l1.start.x-l2.start.x)) / ((l2.end.y-l2.start.y)*(l1.end.x-l1.start.x) - (l2.end.x-l2.start.x)*(l1.end.y-l1.start.y)); + float uB = ((l1.end.x-l1.start.x)*(l1.start.y-l2.start.y) - (l1.end.y-l1.start.y)*(l1.start.x-l2.start.x)) / ((l2.end.y-l2.start.y)*(l1.end.x-l1.start.x) - (l2.end.x-l2.start.x)*(l1.end.y-l1.start.y)); + + if (uA >= 0 && uA <= 1 && uB >= 0 && uB <= 1) { + float intersectionX = l1.start.x + (uA * (l1.end.x-l1.start.x)); + float intersectionY = l1.start.y + (uA * (l1.end.y-l1.start.y)); + return {{intersectionX,intersectionY}}; + } + return {}; + } + + // Get intersection points where rectangle intersects with line segment + template + inline std::vector> intersects(const rect& r, const line& l) + { + std::vector>intersections; + std::vector>result=intersects(r.left(),l); + if(result.size()>0)intersections.push_back(result[0]); + result=intersects(r.right(),l); + if(result.size()>0)intersections.push_back(result[0]); + result=intersects(r.top(),l); + if(result.size()>0)intersections.push_back(result[0]); + result=intersects(r.bottom(),l); + if(result.size()>0)intersections.push_back(result[0]); + return intersections; + } + + // Get intersection points where circle intersects with line segment + template + inline std::vector> intersects(const circle& c, const line& l) + { + // TODO: + return {}; + } + + // Get intersection points where triangle intersects with line segment + template + inline std::vector> intersects(const triangle& t, const line& l) + { + // TODO: + return {}; + } + + + + + + + + + + + + + // ================================================================================================================ + // RECTANGLE ====================================================================================================== + + // Check if point contains rectangle + template + inline constexpr bool contains(const olc::v2d_generic& p, const rect& r) + { + return false; // It can't! + } + + // Check if line segment contains rectangle + template + inline constexpr bool contains(const line& l, const rect& r) + { + return false; // It can't + } + + // Check if rectangle contains rectangle + template + inline constexpr bool contains(const rect& r1, const rect& r2) + { + return (r2.pos.x >= r1.pos.x) && (r2.pos.x + r2.size.x < r1.pos.x + r1.size.x) && + (r2.pos.y >= r1.pos.y) && (r2.pos.y + r2.size.y < r1.pos.y + r1.size.y); + } + + // Check if circle contains rectangle + template + inline constexpr bool contains(const circle& c, const rect& r) + { + return contains(c, r.pos) + && contains(c, olc::v2d_generic{ r.pos.x + r.size.x, r.pos.y }) + && contains(c, olc::v2d_generic{ r.pos.x, r.pos.y + r.size.y }) + && contains(c, r.pos + r.size); + } + + // Check if triangle contains rectangle + template + inline constexpr bool contains(const triangle& t, const rect& r) + { + return contains(t, r.pos) + && contains(t, r.pos + r.size) + && contains(t, olc::v2d_generic{ r.pos.x + r.size.x,r.pos.y }) + && contains(t, olc::v2d_generic{ r.pos.x, r.pos.y + r.size.y }); + } + + + + + // Check if point overlaps rectangle + template + inline constexpr bool overlaps(const olc::v2d_generic& p, const rect& r) + { + return overlaps(r, p); + } + + // Check if line segment overlaps rectangle + template + inline constexpr bool overlaps(const line& l, const rect& r) + { + return overlaps(r, l); + } + + // Check if rectangle overlaps rectangle + template + inline constexpr bool overlaps(const rect& r1, const rect& r2) + { + return (r1.pos.x < r2.pos.x + r2.size.x && r1.pos.x + r1.size.x >= r2.pos.x && + r1.pos.y < r2.pos.y + r2.size.y && r1.pos.y + r1.size.y >= r2.pos.y); + } + + // Check if circle overlaps rectangle + template + inline constexpr bool overlaps(const circle& c, const rect& r) + { + // Inspired by this (very clever btw) + // https://stackoverflow.com/questions/45370692/circle-rectangle-collision-response + // But modified to work :P + T2 overlap = (olc::v2d_generic{ std::clamp(c.pos.x, r.pos.x, r.pos.x + r.size.x), std::clamp(c.pos.y, r.pos.y, r.pos.y + r.size.y) } - c.pos).mag2(); + if (std::isnan(overlap)) overlap = T2(0); + return (overlap - (c.radius * c.radius)) < T2(0); + } + + // Check if triangle overlaps rectangle + template + inline constexpr bool overlaps(const triangle& t, const rect& r) + { + return contains(t, r.pos) + || contains(t, r.pos + r.size) + || contains(t, olc::v2d_generic{ r.pos.x + r.size.x, r.pos.y }) + || contains(t, olc::v2d_generic{ r.pos.x, r.pos.y + r.size.y }); + + // TODO: This method is no good, consider rectangle with all vertices + // outside of triangle, but edges still crossing + } + + + + + // Get intersection points where point intersects with rectangle + template + inline std::vector> intersects(const olc::v2d_generic& p, const rect& r) + { + return intersects(r, p); + } + + // Get intersection points where line segment intersects with rectangle + template + inline std::vector> intersects(const line& l, const rect& r) + { + return intersects(r,l); + } + + // Get intersection points where rectangle intersects with rectangle + template + inline std::vector> intersects(const rect& r1, const rect& r2) + { + // TODO: + return {}; + } + + // Get intersection points where circle intersects with rectangle + template + inline std::vector> intersects(const circle& c, const rect& r) + { + // TODO: + return {}; + } + + // Get intersection points where triangle intersects with rectangle + template + inline std::vector> intersects(const triangle& t, const rect& r) + { + // TODO: + return {}; + } + + + + + + + + + + + + + + // ================================================================================================================ + // CIRCLE ========================================================================================================= + + // Check if point contains circle + template + inline constexpr bool contains(const olc::v2d_generic& p, const circle& c) + { + return false; // It can't! + } + + // Check if line segment contains circle + template + inline constexpr bool contains(const line& l, const circle& c) + { + return false; // It can't! + } + + // Check if rectangle contains circle + template + inline constexpr bool contains(const rect& r, const circle& c) + { + // TODO: + return false; + } + + // Check if circle contains circle + template + inline constexpr bool contains(const circle& c1, const circle& c2) + { + return (c1.pos - c2.pos).mag2() <= (c1.radius - c2.radius) * (c1.radius - c2.radius); + } + + // Check if triangle contains circle + template + inline constexpr bool contains(const triangle& t, const circle& c) + { + // TODO: + return false; + } + + + + + // Check if point overlaps circle + template + inline constexpr bool overlaps(const olc::v2d_generic& p, const circle& c) + { + return overlaps(c, p); + } + + // Check if line segment overlaps circle + template + inline constexpr bool overlaps(const line& l, const circle& c) + { + return overlaps(c, l); + } + + // Check if rectangle overlaps circle + template + inline constexpr bool overlaps(const rect& r, const circle& c) + { + return overlaps(c, r); + } + + // Check if circle overlaps circle + template + inline constexpr bool overlaps(const circle& c1, const circle& c2) + { + return (c1.pos - c2.pos).mag2() <= (c1.radius + c2.radius) * (c1.radius + c2.radius); + } + + // Check if triangle overlaps circle + template + inline constexpr bool overlaps(const triangle& t, const circle& c) + { + // TODO: + return false; + } + + + + + // Get intersection points where point intersects with circle + template + inline std::vector> intersects(const olc::v2d_generic& p, const circle& c) + { + // TODO: + return {}; + } + + // Get intersection points where line segment intersects with circle + template + inline std::vector> intersects(const line& l, const circle& c) + { + // TODO: + return {}; + } + + // Get intersection points where rectangle intersects with circle + template + inline std::vector> intersects(const rect& r, const circle& c) + { + // TODO: + return {}; + } + + // Get intersection points where circle intersects with circle + template + inline std::vector> intersects(const circle& c1, const circle& c2) + { + // TODO: + return {}; + } + + // Get intersection points where triangle intersects with circle + template + inline std::vector> intersects(const triangle& t, const circle& c) + { + // TODO: + return {}; + } + + + + + + + + + + + + + + // ================================================================================================================ + // TRIANGLE ======================================================================================================= + + // Check if point contains triangle + template + inline constexpr bool contains(const olc::v2d_generic& p, const triangle& t) + { + return false; // It can't! + } + + // Check if line segment contains triangle + template + inline constexpr bool contains(const line& l, const triangle& t) + { + return false; // It can't + } + + // Check if rectangle contains triangle + template + inline constexpr bool contains(const rect& r, const triangle& t) + { + // TODO: + return false; + } + + // Check if circle contains triangle + template + inline constexpr bool contains(const circle& c, const triangle& t) + { + // TODO: + return false; + } + + // Check if triangle contains triangle + template + inline constexpr bool contains(const triangle& t1, const triangle& t2) + { + // TODO: + return false; + } + + + + + // Check if point overlaps triangle + template + inline constexpr bool overlaps(const olc::v2d_generic& p, const triangle& t) + { + return overlaps(t, p); + } + + // Check if line segment overlaps triangle + template + inline constexpr bool overlaps(const line& l, const triangle& t) + { + return overlaps(t, l); + } + + // Check if rectangle overlaps triangle + template + inline constexpr bool overlaps(const rect& r, const triangle& t) + { + return overlaps(t, r); + } + + // Check if circle overlaps triangle + template + inline constexpr bool overlaps(const circle& c, const triangle& t) + { + return overlaps(t, c); + } + + // Check if triangle overlaps triangle + template + inline constexpr bool overlaps(const triangle& t1, const triangle& t2) + { + // TODO: + return false; + } + + + + + // Get intersection points where point intersects with triangle + template + inline std::vector> intersects(const olc::v2d_generic& p, const triangle& t) + { + // TODO: + return {}; + } + + // Get intersection points where line segment intersects with triangle + template + inline std::vector> intersects(const line& l, const triangle& t) + { + // TODO: + return {}; + } + + // Get intersection points where rectangle intersects with triangle + template + inline std::vector> intersects(const rect& r, const triangle& t) + { + // TODO: + return {}; + } + + // Get intersection points where circle intersects with triangle + template + inline std::vector> intersects(const circle& c, const triangle& t) + { + // TODO: + return {}; + } + + // Get intersection points where triangle intersects with triangle + template + inline std::vector> intersects(const triangle& t1, const triangle& t2) + { + // TODO: + return {}; + } + +} + +using namespace olc::utils; \ No newline at end of file diff --git a/Example/pack b/Example/pack new file mode 100644 index 0000000..fbaf0fc Binary files /dev/null and b/Example/pack differ diff --git a/Example/res/tiles.png b/Example/res/tiles.png new file mode 100644 index 0000000..3af9b4b Binary files /dev/null and b/Example/res/tiles.png differ diff --git a/Example/safemap.h b/Example/safemap.h new file mode 100644 index 0000000..593b4f3 --- /dev/null +++ b/Example/safemap.h @@ -0,0 +1,15 @@ +#pragma once +#include "olcPixelGameEngine.h" + +template +class safemap{ + static bool initializeCompleted; + std::mapmap; +public: + O&operator[](T key){ + if(initializeCompleted&&map.count(key)==0){ + std::cout<<"WARNING! Trying to get non-existent key "< +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#pragma endregion + +#define PGE_VER 223 + +// O------------------------------------------------------------------------------O +// | COMPILER CONFIGURATION ODDITIES | +// O------------------------------------------------------------------------------O +#pragma region compiler_config +#define USE_EXPERIMENTAL_FS +#if defined(_WIN32) + #if _MSC_VER >= 1920 && _MSVC_LANG >= 201703L + #undef USE_EXPERIMENTAL_FS + #endif +#endif +#if defined(__linux__) || defined(__MINGW32__) || defined(__EMSCRIPTEN__) || defined(__FreeBSD__) || defined(__APPLE__) + #if __cplusplus >= 201703L + #undef USE_EXPERIMENTAL_FS + #endif +#endif + +#if !defined(OLC_KEYBOARD_UK) + #define OLC_KEYBOARD_UK +#endif + + +#if defined(USE_EXPERIMENTAL_FS) || defined(FORCE_EXPERIMENTAL_FS) + // C++14 + #define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING + #include + namespace _gfs = std::experimental::filesystem::v1; +#else + // C++17 + #include + namespace _gfs = std::filesystem; +#endif + +#if defined(UNICODE) || defined(_UNICODE) + #define olcT(s) L##s +#else + #define olcT(s) s +#endif + +#define UNUSED(x) (void)(x) + +// O------------------------------------------------------------------------------O +// | PLATFORM SELECTION CODE, Thanks slavka! | +// O------------------------------------------------------------------------------O + +#if defined(OLC_PGE_HEADLESS) + #define OLC_PLATFORM_HEADLESS + #define OLC_GFX_HEADLESS + #if !defined(OLC_IMAGE_STB) && !defined(OLC_IMAGE_GDI) && !defined(OLC_IMAGE_LIBPNG) + #define OLC_IMAGE_HEADLESS + #endif +#endif + +// Platform +#if !defined(OLC_PLATFORM_WINAPI) && !defined(OLC_PLATFORM_X11) && !defined(OLC_PLATFORM_GLUT) && !defined(OLC_PLATFORM_EMSCRIPTEN) && !defined(OLC_PLATFORM_HEADLESS) + #if !defined(OLC_PLATFORM_CUSTOM_EX) + #if defined(_WIN32) + #define OLC_PLATFORM_WINAPI + #endif + #if defined(__linux__) || defined(__FreeBSD__) + #define OLC_PLATFORM_X11 + #endif + #if defined(__APPLE__) + #define GL_SILENCE_DEPRECATION + #define OLC_PLATFORM_GLUT + #endif + #if defined(__EMSCRIPTEN__) + #define OLC_PLATFORM_EMSCRIPTEN + #endif + #endif +#endif + +// Start Situation +#if defined(OLC_PLATFORM_GLUT) || defined(OLC_PLATFORM_EMSCRIPTEN) + #define PGE_USE_CUSTOM_START +#endif + + + +// Renderer +#if !defined(OLC_GFX_OPENGL10) && !defined(OLC_GFX_OPENGL33) && !defined(OLC_GFX_DIRECTX10) && !defined(OLC_GFX_HEADLESS) + #if !defined(OLC_GFX_CUSTOM_EX) + #if defined(OLC_PLATFORM_EMSCRIPTEN) + #define OLC_GFX_OPENGL33 + #else + #define OLC_GFX_OPENGL10 + #endif + #endif +#endif + +// Image loader +#if !defined(OLC_IMAGE_STB) && !defined(OLC_IMAGE_GDI) && !defined(OLC_IMAGE_LIBPNG) && !defined(OLC_IMAGE_HEADLESS) + #if !defined(OLC_IMAGE_CUSTOM_EX) + #if defined(_WIN32) + #define OLC_IMAGE_GDI + #endif + #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__EMSCRIPTEN__) + #define OLC_IMAGE_LIBPNG + #endif + #endif +#endif + + +// O------------------------------------------------------------------------------O +// | PLATFORM-SPECIFIC DEPENDENCIES | +// O------------------------------------------------------------------------------O +#if !defined(OLC_PGE_HEADLESS) +#if defined(OLC_PLATFORM_WINAPI) + #define _WINSOCKAPI_ // Thanks Cornchipss + #if !defined(VC_EXTRALEAN) + #define VC_EXTRALEAN + #endif + #if !defined(NOMINMAX) + #define NOMINMAX + #endif + + // In Code::Blocks + #if !defined(_WIN32_WINNT) + #ifdef HAVE_MSMF + #define _WIN32_WINNT 0x0600 // Windows Vista + #else + #define _WIN32_WINNT 0x0500 // Windows 2000 + #endif + #endif + + #include + #undef _WINSOCKAPI_ +#endif + +#if defined(OLC_PLATFORM_X11) + namespace X11 + { + #include + #include + } +#endif + +#if defined(OLC_PLATFORM_GLUT) + #if defined(__linux__) + #include + #include + #endif + #if defined(__APPLE__) + #include + #include + #include + #endif +#endif +#endif + +#if defined(OLC_PGE_HEADLESS) +#if defined max +#undef max +#endif +#if defined min +#undef min +#endif +#endif +#pragma endregion + +// O------------------------------------------------------------------------------O +// | olcPixelGameEngine INTERFACE DECLARATION | +// O------------------------------------------------------------------------------O +#pragma region pge_declaration +namespace olc +{ + class PixelGameEngine; + class Sprite; + + // Pixel Game Engine Advanced Configuration + constexpr uint8_t nMouseButtons = 5; + constexpr uint8_t nDefaultAlpha = 0xFF; + constexpr uint32_t nDefaultPixel = (nDefaultAlpha << 24); + constexpr uint8_t nTabSizeInSpaces = 4; + constexpr size_t OLC_MAX_VERTS = 128; + enum rcode { FAIL = 0, OK = 1, NO_FILE = -1 }; + + // O------------------------------------------------------------------------------O + // | olc::Pixel - Represents a 32-Bit RGBA colour | + // O------------------------------------------------------------------------------O + struct Pixel + { + union + { + uint32_t n = nDefaultPixel; + struct { uint8_t r; uint8_t g; uint8_t b; uint8_t a; }; + }; + + enum Mode { NORMAL, MASK, ALPHA, CUSTOM }; + + Pixel(); + Pixel(uint8_t red, uint8_t green, uint8_t blue, uint8_t alpha = nDefaultAlpha); + Pixel(uint32_t p); + Pixel& operator = (const Pixel& v) = default; + bool operator ==(const Pixel& p) const; + bool operator !=(const Pixel& p) const; + Pixel operator * (const float i) const; + Pixel operator / (const float i) const; + Pixel& operator *=(const float i); + Pixel& operator /=(const float i); + Pixel operator + (const Pixel& p) const; + Pixel operator - (const Pixel& p) const; + Pixel& operator +=(const Pixel& p); + Pixel& operator -=(const Pixel& p); + Pixel operator * (const Pixel& p) const; + Pixel& operator *=(const Pixel& p); + Pixel inv() const; + }; + + Pixel PixelF(float red, float green, float blue, float alpha = 1.0f); + Pixel PixelLerp(const olc::Pixel& p1, const olc::Pixel& p2, float t); + + + // O------------------------------------------------------------------------------O + // | USEFUL CONSTANTS | + // O------------------------------------------------------------------------------O + static const Pixel + GREY(192, 192, 192), DARK_GREY(128, 128, 128), VERY_DARK_GREY(64, 64, 64), + RED(255, 0, 0), DARK_RED(128, 0, 0), VERY_DARK_RED(64, 0, 0), + YELLOW(255, 255, 0), DARK_YELLOW(128, 128, 0), VERY_DARK_YELLOW(64, 64, 0), + GREEN(0, 255, 0), DARK_GREEN(0, 128, 0), VERY_DARK_GREEN(0, 64, 0), + CYAN(0, 255, 255), DARK_CYAN(0, 128, 128), VERY_DARK_CYAN(0, 64, 64), + BLUE(0, 0, 255), DARK_BLUE(0, 0, 128), VERY_DARK_BLUE(0, 0, 64), + MAGENTA(255, 0, 255), DARK_MAGENTA(128, 0, 128), VERY_DARK_MAGENTA(64, 0, 64), + WHITE(255, 255, 255), BLACK(0, 0, 0), BLANK(0, 0, 0, 0); + + // Thanks to scripticuk and others for updating the key maps + // NOTE: The GLUT platform will need updating, open to contributions ;) + enum Key + { + NONE, + A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z, + K0, K1, K2, K3, K4, K5, K6, K7, K8, K9, + F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, + UP, DOWN, LEFT, RIGHT, + SPACE, TAB, SHIFT, CTRL, INS, DEL, HOME, END, PGUP, PGDN, + BACK, ESCAPE, RETURN, ENTER, PAUSE, SCROLL, + NP0, NP1, NP2, NP3, NP4, NP5, NP6, NP7, NP8, NP9, + NP_MUL, NP_DIV, NP_ADD, NP_SUB, NP_DECIMAL, PERIOD, + EQUALS, COMMA, MINUS, + OEM_1, OEM_2, OEM_3, OEM_4, OEM_5, OEM_6, OEM_7, OEM_8, + CAPS_LOCK, ENUM_END + }; + + namespace Mouse + { + static constexpr int32_t LEFT = 0; + static constexpr int32_t RIGHT = 1; + static constexpr int32_t MIDDLE = 2; + }; + + // O------------------------------------------------------------------------------O + // | olc::HWButton - Represents the state of a hardware button (mouse/key/joy) | + // O------------------------------------------------------------------------------O + struct HWButton + { + bool bPressed = false; // Set once during the frame the event occurs + bool bReleased = false; // Set once during the frame the event occurs + bool bHeld = false; // Set true for all frames between pressed and released events + }; + + + + + // O------------------------------------------------------------------------------O + // | olc::vX2d - A generic 2D vector type | + // O------------------------------------------------------------------------------O +#if !defined(OLC_IGNORE_VEC2D) + template + struct v2d_generic + { + T x = 0; + T y = 0; + v2d_generic() : x(0), y(0) {} + v2d_generic(T _x, T _y) : x(_x), y(_y) {} + v2d_generic(const v2d_generic& v) : x(v.x), y(v.y) {} + v2d_generic& operator=(const v2d_generic& v) = default; + T mag() const { return T(std::sqrt(x * x + y * y)); } + T mag2() const { return x * x + y * y; } + v2d_generic norm() const { T r = 1 / mag(); return v2d_generic(x * r, y * r); } + v2d_generic perp() const { return v2d_generic(-y, x); } + v2d_generic floor() const { return v2d_generic(std::floor(x), std::floor(y)); } + v2d_generic ceil() const { return v2d_generic(std::ceil(x), std::ceil(y)); } + v2d_generic max(const v2d_generic& v) const { return v2d_generic(std::max(x, v.x), std::max(y, v.y)); } + v2d_generic min(const v2d_generic& v) const { return v2d_generic(std::min(x, v.x), std::min(y, v.y)); } + v2d_generic cart() { return { std::cos(y) * x, std::sin(y) * x }; } + v2d_generic polar() { return { mag(), std::atan2(y, x) }; } + v2d_generic clamp(const v2d_generic& v1, const v2d_generic& v2) const { return this->max(v1).min(v2); } + v2d_generic lerp(const v2d_generic& v1, const double t) { return this->operator*(T(1.0 - t)) + (v1 * T(t)); } + T dot(const v2d_generic& rhs) const { return this->x * rhs.x + this->y * rhs.y; } + T cross(const v2d_generic& rhs) const { return this->x * rhs.y - this->y * rhs.x; } + v2d_generic operator + (const v2d_generic& rhs) const { return v2d_generic(this->x + rhs.x, this->y + rhs.y); } + v2d_generic operator - (const v2d_generic& rhs) const { return v2d_generic(this->x - rhs.x, this->y - rhs.y); } + v2d_generic operator * (const T& rhs) const { return v2d_generic(this->x * rhs, this->y * rhs); } + v2d_generic operator * (const v2d_generic& rhs) const { return v2d_generic(this->x * rhs.x, this->y * rhs.y); } + v2d_generic operator / (const T& rhs) const { return v2d_generic(this->x / rhs, this->y / rhs); } + v2d_generic operator / (const v2d_generic& rhs) const { return v2d_generic(this->x / rhs.x, this->y / rhs.y); } + v2d_generic& operator += (const v2d_generic& rhs) { this->x += rhs.x; this->y += rhs.y; return *this; } + v2d_generic& operator -= (const v2d_generic& rhs) { this->x -= rhs.x; this->y -= rhs.y; return *this; } + v2d_generic& operator *= (const T& rhs) { this->x *= rhs; this->y *= rhs; return *this; } + v2d_generic& operator /= (const T& rhs) { this->x /= rhs; this->y /= rhs; return *this; } + v2d_generic& operator *= (const v2d_generic& rhs) { this->x *= rhs.x; this->y *= rhs.y; return *this; } + v2d_generic& operator /= (const v2d_generic& rhs) { this->x /= rhs.x; this->y /= rhs.y; return *this; } + v2d_generic operator + () const { return { +x, +y }; } + v2d_generic operator - () const { return { -x, -y }; } + bool operator == (const v2d_generic& rhs) const { return (this->x == rhs.x && this->y == rhs.y); } + bool operator != (const v2d_generic& rhs) const { return (this->x != rhs.x || this->y != rhs.y); } + const std::string str() const { return std::string("(") + std::to_string(this->x) + "," + std::to_string(this->y) + ")"; } + friend std::ostream& operator << (std::ostream& os, const v2d_generic& rhs) { os << rhs.str(); return os; } + operator v2d_generic() const { return { static_cast(this->x), static_cast(this->y) }; } + operator v2d_generic() const { return { static_cast(this->x), static_cast(this->y) }; } + operator v2d_generic() const { return { static_cast(this->x), static_cast(this->y) }; } + }; + + // Note: joshinils has some good suggestions here, but they are complicated to implement at this moment, + // however they will appear in a future version of PGE + template inline v2d_generic operator * (const float& lhs, const v2d_generic& rhs) + { return v2d_generic((T)(lhs * (float)rhs.x), (T)(lhs * (float)rhs.y)); } + template inline v2d_generic operator * (const double& lhs, const v2d_generic& rhs) + { return v2d_generic((T)(lhs * (double)rhs.x), (T)(lhs * (double)rhs.y)); } + template inline v2d_generic operator * (const int& lhs, const v2d_generic& rhs) + { return v2d_generic((T)(lhs * (int)rhs.x), (T)(lhs * (int)rhs.y)); } + template inline v2d_generic operator / (const float& lhs, const v2d_generic& rhs) + { return v2d_generic((T)(lhs / (float)rhs.x), (T)(lhs / (float)rhs.y)); } + template inline v2d_generic operator / (const double& lhs, const v2d_generic& rhs) + { return v2d_generic((T)(lhs / (double)rhs.x), (T)(lhs / (double)rhs.y)); } + template inline v2d_generic operator / (const int& lhs, const v2d_generic& rhs) + { return v2d_generic((T)(lhs / (int)rhs.x), (T)(lhs / (int)rhs.y)); } + + // To stop dandistine crying... + template inline bool operator < (const v2d_generic& lhs, const v2d_generic& rhs) + { return lhs.y < rhs.y || (lhs.y == rhs.y && lhs.x < rhs.x); } + template inline bool operator > (const v2d_generic& lhs, const v2d_generic& rhs) + { return lhs.y > rhs.y || (lhs.y == rhs.y && lhs.x > rhs.x); } + + typedef v2d_generic vi2d; + typedef v2d_generic vu2d; + typedef v2d_generic vf2d; + typedef v2d_generic vd2d; +#endif + + + + + + + // O------------------------------------------------------------------------------O + // | olc::ResourcePack - A virtual scrambled filesystem to pack your assets into | + // O------------------------------------------------------------------------------O + struct ResourceBuffer : public std::streambuf + { + ResourceBuffer(std::ifstream& ifs, uint32_t offset, uint32_t size); + std::vector vMemory; + }; + + class ResourcePack : public std::streambuf + { + public: + ResourcePack(); + ~ResourcePack(); + bool AddFile(const std::string& sFile); + bool LoadPack(const std::string& sFile, const std::string& sKey); + bool SavePack(const std::string& sFile, const std::string& sKey); + ResourceBuffer GetFileBuffer(const std::string& sFile); + bool Loaded(); + private: + struct sResourceFile { uint32_t nSize; uint32_t nOffset; }; + std::map mapFiles; + std::ifstream baseFile; + std::vector scramble(const std::vector& data, const std::string& key); + std::string makeposix(const std::string& path); + }; + + + class ImageLoader + { + public: + ImageLoader() = default; + virtual ~ImageLoader() = default; + virtual olc::rcode LoadImageResource(olc::Sprite* spr, const std::string& sImageFile, olc::ResourcePack* pack) = 0; + virtual olc::rcode SaveImageResource(olc::Sprite* spr, const std::string& sImageFile) = 0; + }; + + + // O------------------------------------------------------------------------------O + // | olc::Sprite - An image represented by a 2D array of olc::Pixel | + // O------------------------------------------------------------------------------O + class Sprite + { + public: + Sprite(); + Sprite(const std::string& sImageFile, olc::ResourcePack* pack = nullptr); + Sprite(int32_t w, int32_t h); + Sprite(const olc::Sprite&) = delete; + ~Sprite(); + + public: + olc::rcode LoadFromFile(const std::string& sImageFile, olc::ResourcePack* pack = nullptr); + + public: + int32_t width = 0; + int32_t height = 0; + enum Mode { NORMAL, PERIODIC, CLAMP }; + enum Flip { NONE = 0, HORIZ = 1, VERT = 2 }; + + public: + void SetSampleMode(olc::Sprite::Mode mode = olc::Sprite::Mode::NORMAL); + Pixel GetPixel(int32_t x, int32_t y) const; + bool SetPixel(int32_t x, int32_t y, Pixel p); + Pixel GetPixel(const olc::vi2d& a) const; + bool SetPixel(const olc::vi2d& a, Pixel p); + Pixel Sample(float x, float y) const; + Pixel Sample(const olc::vf2d& uv) const; + Pixel SampleBL(float u, float v) const; + Pixel SampleBL(const olc::vf2d& uv) const; + Pixel* GetData(); + olc::Sprite* Duplicate(); + olc::Sprite* Duplicate(const olc::vi2d& vPos, const olc::vi2d& vSize); + olc::vi2d Size() const; + std::vector pColData; + Mode modeSample = Mode::NORMAL; + + static std::unique_ptr loader; + }; + + // O------------------------------------------------------------------------------O + // | olc::Decal - A GPU resident storage of an olc::Sprite | + // O------------------------------------------------------------------------------O + class Decal + { + public: + Decal(olc::Sprite* spr, bool filter = false, bool clamp = true); + Decal(const uint32_t nExistingTextureResource, olc::Sprite* spr); + virtual ~Decal(); + void Update(); + void UpdateSprite(); + + public: // But dont touch + int32_t id = -1; + olc::Sprite* sprite = nullptr; + olc::vf2d vUVScale = { 1.0f, 1.0f }; + }; + + enum class DecalMode + { + NORMAL, + ADDITIVE, + MULTIPLICATIVE, + STENCIL, + ILLUMINATE, + WIREFRAME, + MODEL3D, + }; + + enum class DecalStructure + { + LINE, + FAN, + STRIP, + LIST + }; + + // O------------------------------------------------------------------------------O + // | olc::Renderable - Convenience class to keep a sprite and decal together | + // O------------------------------------------------------------------------------O + class Renderable + { + public: + Renderable() = default; + Renderable(Renderable&& r) : pSprite(std::move(r.pSprite)), pDecal(std::move(r.pDecal)) {} + Renderable(const Renderable&) = delete; + olc::rcode Load(const std::string& sFile, ResourcePack* pack = nullptr, bool filter = false, bool clamp = true); + void Create(uint32_t width, uint32_t height, bool filter = false, bool clamp = true); + olc::Decal* Decal() const; + olc::Sprite* Sprite() const; + + private: + std::unique_ptr pSprite = nullptr; + std::unique_ptr pDecal = nullptr; + }; + + + // O------------------------------------------------------------------------------O + // | Auxilliary components internal to engine | + // O------------------------------------------------------------------------------O + + struct DecalInstance + { + olc::Decal* decal = nullptr; + std::vector pos; + std::vector uv; + std::vector w; + std::vector tint; + olc::DecalMode mode = olc::DecalMode::NORMAL; + olc::DecalStructure structure = olc::DecalStructure::FAN; + uint32_t points = 0; + }; + + struct LayerDesc + { + olc::vf2d vOffset = { 0, 0 }; + olc::vf2d vScale = { 1, 1 }; + bool bShow = false; + bool bUpdate = false; + olc::Renderable pDrawTarget; + uint32_t nResID = 0; + std::vector vecDecalInstance; + olc::Pixel tint = olc::WHITE; + std::function funcHook = nullptr; + }; + + class Renderer + { + public: + virtual ~Renderer() = default; + virtual void PrepareDevice() = 0; + virtual olc::rcode CreateDevice(std::vector params, bool bFullScreen, bool bVSYNC) = 0; + virtual olc::rcode DestroyDevice() = 0; + virtual void DisplayFrame() = 0; + virtual void PrepareDrawing() = 0; + virtual void SetDecalMode(const olc::DecalMode& mode) = 0; + virtual void DrawLayerQuad(const olc::vf2d& offset, const olc::vf2d& scale, const olc::Pixel tint) = 0; + virtual void DrawDecal(const olc::DecalInstance& decal) = 0; + virtual uint32_t CreateTexture(const uint32_t width, const uint32_t height, const bool filtered = false, const bool clamp = true) = 0; + virtual void UpdateTexture(uint32_t id, olc::Sprite* spr) = 0; + virtual void ReadTexture(uint32_t id, olc::Sprite* spr) = 0; + virtual uint32_t DeleteTexture(const uint32_t id) = 0; + virtual void ApplyTexture(uint32_t id) = 0; + virtual void UpdateViewport(const olc::vi2d& pos, const olc::vi2d& size) = 0; + virtual void ClearBuffer(olc::Pixel p, bool bDepth) = 0; + static olc::PixelGameEngine* ptrPGE; + }; + + class Platform + { + public: + virtual ~Platform() = default; + virtual olc::rcode ApplicationStartUp() = 0; + virtual olc::rcode ApplicationCleanUp() = 0; + virtual olc::rcode ThreadStartUp() = 0; + virtual olc::rcode ThreadCleanUp() = 0; + virtual olc::rcode CreateGraphics(bool bFullScreen, bool bEnableVSYNC, const olc::vi2d& vViewPos, const olc::vi2d& vViewSize) = 0; + virtual olc::rcode CreateWindowPane(const olc::vi2d& vWindowPos, olc::vi2d& vWindowSize, bool bFullScreen) = 0; + virtual olc::rcode SetWindowTitle(const std::string& s) = 0; + virtual olc::rcode StartSystemEventLoop() = 0; + virtual olc::rcode HandleSystemEvent() = 0; + static olc::PixelGameEngine* ptrPGE; + }; + + class PGEX; + + // The Static Twins (plus one) + static std::unique_ptr renderer; + static std::unique_ptr platform; + static std::map mapKeys; + + // O------------------------------------------------------------------------------O + // | olc::PixelGameEngine - The main BASE class for your application | + // O------------------------------------------------------------------------------O + class PixelGameEngine + { + public: + PixelGameEngine(); + virtual ~PixelGameEngine(); + public: + olc::rcode Construct(int32_t screen_w, int32_t screen_h, int32_t pixel_w, int32_t pixel_h, + bool full_screen = false, bool vsync = false, bool cohesion = false); + olc::rcode Start(); + + public: // User Override Interfaces + // Called once on application startup, use to load your resources + virtual bool OnUserCreate(); + // Called every frame, and provides you with a time per frame value + virtual bool OnUserUpdate(float fElapsedTime); + // Called once on application termination, so you can be one clean coder + virtual bool OnUserDestroy(); + + // Called when a text entry is confirmed with "enter" key + virtual void OnTextEntryComplete(const std::string& sText); + // Called when a console command is executed + virtual bool OnConsoleCommand(const std::string& sCommand); + + + public: // Hardware Interfaces + // Returns true if window is currently in focus + bool IsFocused() const; + // Get the state of a specific keyboard button + HWButton GetKey(Key k) const; + // Get the state of a specific mouse button + HWButton GetMouse(uint32_t b) const; + // Get Mouse X coordinate in "pixel" space + int32_t GetMouseX() const; + // Get Mouse Y coordinate in "pixel" space + int32_t GetMouseY() const; + // Get Mouse Wheel Delta + int32_t GetMouseWheel() const; + // Get the mouse in window space + const olc::vi2d& GetWindowMouse() const; + // Gets the mouse as a vector to keep Tarriest happy + const olc::vi2d& GetMousePos() const; + + static const std::map& GetKeyMap() { return mapKeys; } + + public: // Utility + // Returns the width of the screen in "pixels" + int32_t ScreenWidth() const; + // Returns the height of the screen in "pixels" + int32_t ScreenHeight() const; + // Returns the width of the currently selected drawing target in "pixels" + int32_t GetDrawTargetWidth() const; + // Returns the height of the currently selected drawing target in "pixels" + int32_t GetDrawTargetHeight() const; + // Returns the currently active draw target + olc::Sprite* GetDrawTarget() const; + // Resize the primary screen sprite + void SetScreenSize(int w, int h); + // Specify which Sprite should be the target of drawing functions, use nullptr + // to specify the primary screen + void SetDrawTarget(Sprite* target); + // Gets the current Frames Per Second + uint32_t GetFPS() const; + // Gets last update of elapsed time + float GetElapsedTime() const; + // Returns whether the mouse cursor exists inside the window or outside of it. + const bool IsMouseInsideWindow() const; + // Gets Actual Window pos + const olc::vi2d& GetWindowPos() const; + // Gets Actual Window size + const olc::vi2d& GetWindowSize() const; + // Gets pixel scale + const olc::vi2d& GetPixelSize() const; + // Gets actual pixel scale + const olc::vi2d& GetScreenPixelSize() const; + // Gets "screen" size + const olc::vi2d& GetScreenSize() const; + // Gets any files dropped this frame + const std::vector& GetDroppedFiles() const; + const olc::vi2d& GetDroppedFilesPoint() const; + + public: // CONFIGURATION ROUTINES + // Layer targeting functions + void SetDrawTarget(uint8_t layer, bool bDirty = true); + void EnableLayer(uint8_t layer, bool b); + void SetLayerOffset(uint8_t layer, const olc::vf2d& offset); + void SetLayerOffset(uint8_t layer, float x, float y); + void SetLayerScale(uint8_t layer, const olc::vf2d& scale); + void SetLayerScale(uint8_t layer, float x, float y); + void SetLayerTint(uint8_t layer, const olc::Pixel& tint); + void SetLayerCustomRenderFunction(uint8_t layer, std::function f); + + std::vector& GetLayers(); + uint32_t CreateLayer(); + + // Change the pixel mode for different optimisations + // olc::Pixel::NORMAL = No transparency + // olc::Pixel::MASK = Transparent if alpha is < 255 + // olc::Pixel::ALPHA = Full transparency + void SetPixelMode(Pixel::Mode m); + Pixel::Mode GetPixelMode(); + // Use a custom blend function + void SetPixelMode(std::function pixelMode); + // Change the blend factor from between 0.0f to 1.0f; + void SetPixelBlend(float fBlend); + + + + public: // DRAWING ROUTINES + // Draws a single Pixel + virtual bool Draw(int32_t x, int32_t y, Pixel p = olc::WHITE); + bool Draw(const olc::vi2d& pos, Pixel p = olc::WHITE); + // Draws a line from (x1,y1) to (x2,y2) + void DrawLine(int32_t x1, int32_t y1, int32_t x2, int32_t y2, Pixel p = olc::WHITE, uint32_t pattern = 0xFFFFFFFF); + void DrawLine(const olc::vi2d& pos1, const olc::vi2d& pos2, Pixel p = olc::WHITE, uint32_t pattern = 0xFFFFFFFF); + // Draws a circle located at (x,y) with radius + void DrawCircle(int32_t x, int32_t y, int32_t radius, Pixel p = olc::WHITE, uint8_t mask = 0xFF); + void DrawCircle(const olc::vi2d& pos, int32_t radius, Pixel p = olc::WHITE, uint8_t mask = 0xFF); + // Fills a circle located at (x,y) with radius + void FillCircle(int32_t x, int32_t y, int32_t radius, Pixel p = olc::WHITE); + void FillCircle(const olc::vi2d& pos, int32_t radius, Pixel p = olc::WHITE); + // Draws a rectangle at (x,y) to (x+w,y+h) + void DrawRect(int32_t x, int32_t y, int32_t w, int32_t h, Pixel p = olc::WHITE); + void DrawRect(const olc::vi2d& pos, const olc::vi2d& size, Pixel p = olc::WHITE); + // Fills a rectangle at (x,y) to (x+w,y+h) + void FillRect(int32_t x, int32_t y, int32_t w, int32_t h, Pixel p = olc::WHITE); + void FillRect(const olc::vi2d& pos, const olc::vi2d& size, Pixel p = olc::WHITE); + // Draws a triangle between points (x1,y1), (x2,y2) and (x3,y3) + void DrawTriangle(int32_t x1, int32_t y1, int32_t x2, int32_t y2, int32_t x3, int32_t y3, Pixel p = olc::WHITE); + void DrawTriangle(const olc::vi2d& pos1, const olc::vi2d& pos2, const olc::vi2d& pos3, Pixel p = olc::WHITE); + // Flat fills a triangle between points (x1,y1), (x2,y2) and (x3,y3) + void FillTriangle(int32_t x1, int32_t y1, int32_t x2, int32_t y2, int32_t x3, int32_t y3, Pixel p = olc::WHITE); + void FillTriangle(const olc::vi2d& pos1, const olc::vi2d& pos2, const olc::vi2d& pos3, Pixel p = olc::WHITE); + // Fill a textured and coloured triangle + void FillTexturedTriangle(const std::vector& vPoints, std::vector vTex, std::vector vColour, olc::Sprite* sprTex); + void FillTexturedPolygon(const std::vector& vPoints, const std::vector& vTex, const std::vector& vColour, olc::Sprite* sprTex, olc::DecalStructure structure = olc::DecalStructure::LIST); + // Draws an entire sprite at location (x,y) + void DrawSprite(int32_t x, int32_t y, Sprite* sprite, uint32_t scale = 1, uint8_t flip = olc::Sprite::NONE); + void DrawSprite(const olc::vi2d& pos, Sprite* sprite, uint32_t scale = 1, uint8_t flip = olc::Sprite::NONE); + // Draws an area of a sprite at location (x,y), where the + // selected area is (ox,oy) to (ox+w,oy+h) + void DrawPartialSprite(int32_t x, int32_t y, Sprite* sprite, int32_t ox, int32_t oy, int32_t w, int32_t h, uint32_t scale = 1, uint8_t flip = olc::Sprite::NONE); + void DrawPartialSprite(const olc::vi2d& pos, Sprite* sprite, const olc::vi2d& sourcepos, const olc::vi2d& size, uint32_t scale = 1, uint8_t flip = olc::Sprite::NONE); + // Draws a single line of text - traditional monospaced + void DrawString(int32_t x, int32_t y, const std::string& sText, Pixel col = olc::WHITE, uint32_t scale = 1); + void DrawString(const olc::vi2d& pos, const std::string& sText, Pixel col = olc::WHITE, uint32_t scale = 1); + olc::vi2d GetTextSize(const std::string& s); + // Draws a single line of text - non-monospaced + void DrawStringProp(int32_t x, int32_t y, const std::string& sText, Pixel col = olc::WHITE, uint32_t scale = 1); + void DrawStringProp(const olc::vi2d& pos, const std::string& sText, Pixel col = olc::WHITE, uint32_t scale = 1); + olc::vi2d GetTextSizeProp(const std::string& s); + + // Decal Quad functions + void SetDecalMode(const olc::DecalMode& mode); + void SetDecalStructure(const olc::DecalStructure& structure); + // Draws a whole decal, with optional scale and tinting + void DrawDecal(const olc::vf2d& pos, olc::Decal* decal, const olc::vf2d& scale = { 1.0f,1.0f }, const olc::Pixel& tint = olc::WHITE); + // Draws a region of a decal, with optional scale and tinting + void DrawPartialDecal(const olc::vf2d& pos, olc::Decal* decal, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::vf2d& scale = { 1.0f,1.0f }, const olc::Pixel& tint = olc::WHITE); + void DrawPartialDecal(const olc::vf2d& pos, const olc::vf2d& size, olc::Decal* decal, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint = olc::WHITE); + // Draws fully user controlled 4 vertices, pos(pixels), uv(pixels), colours + void DrawExplicitDecal(olc::Decal* decal, const olc::vf2d* pos, const olc::vf2d* uv, const olc::Pixel* col, uint32_t elements = 4); + // Draws a decal with 4 arbitrary points, warping the texture to look "correct" + void DrawWarpedDecal(olc::Decal* decal, const olc::vf2d(&pos)[4], const olc::Pixel& tint = olc::WHITE); + void DrawWarpedDecal(olc::Decal* decal, const olc::vf2d* pos, const olc::Pixel& tint = olc::WHITE); + void DrawWarpedDecal(olc::Decal* decal, const std::array& pos, const olc::Pixel& tint = olc::WHITE); + // As above, but you can specify a region of a decal source sprite + void DrawPartialWarpedDecal(olc::Decal* decal, const olc::vf2d(&pos)[4], const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint = olc::WHITE); + void DrawPartialWarpedDecal(olc::Decal* decal, const olc::vf2d* pos, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint = olc::WHITE); + void DrawPartialWarpedDecal(olc::Decal* decal, const std::array& pos, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint = olc::WHITE); + // Draws a decal rotated to specified angle, wit point of rotation offset + void DrawRotatedDecal(const olc::vf2d& pos, olc::Decal* decal, const float fAngle, const olc::vf2d& center = { 0.0f, 0.0f }, const olc::vf2d& scale = { 1.0f,1.0f }, const olc::Pixel& tint = olc::WHITE); + void DrawPartialRotatedDecal(const olc::vf2d& pos, olc::Decal* decal, const float fAngle, const olc::vf2d& center, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::vf2d& scale = { 1.0f, 1.0f }, const olc::Pixel& tint = olc::WHITE); + // Draws a multiline string as a decal, with tiniting and scaling + void DrawStringDecal(const olc::vf2d& pos, const std::string& sText, const Pixel col = olc::WHITE, const olc::vf2d& scale = { 1.0f, 1.0f }); + void DrawStringPropDecal(const olc::vf2d& pos, const std::string& sText, const Pixel col = olc::WHITE, const olc::vf2d& scale = { 1.0f, 1.0f }); + // Draws a single shaded filled rectangle as a decal + void DrawRectDecal(const olc::vf2d& pos, const olc::vf2d& size, const olc::Pixel col = olc::WHITE); + void FillRectDecal(const olc::vf2d& pos, const olc::vf2d& size, const olc::Pixel col = olc::WHITE); + // Draws a corner shaded rectangle as a decal + void GradientFillRectDecal(const olc::vf2d& pos, const olc::vf2d& size, const olc::Pixel colTL, const olc::Pixel colBL, const olc::Pixel colBR, const olc::Pixel colTR); + // Draws an arbitrary convex textured polygon using GPU + void DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& uv, const olc::Pixel tint = olc::WHITE); + void DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& depth, const std::vector& uv, const olc::Pixel tint = olc::WHITE); + void DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& uv, const std::vector& tint); + void DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& uv, const std::vector& colours, const olc::Pixel tint); + // Draws a line in Decal Space + void DrawLineDecal(const olc::vf2d& pos1, const olc::vf2d& pos2, Pixel p = olc::WHITE); + void DrawRotatedStringDecal(const olc::vf2d& pos, const std::string& sText, const float fAngle, const olc::vf2d& center = { 0.0f, 0.0f }, const olc::Pixel col = olc::WHITE, const olc::vf2d& scale = { 1.0f, 1.0f }); + void DrawRotatedStringPropDecal(const olc::vf2d& pos, const std::string& sText, const float fAngle, const olc::vf2d& center = { 0.0f, 0.0f }, const olc::Pixel col = olc::WHITE, const olc::vf2d& scale = { 1.0f, 1.0f }); + // Clears entire draw target to Pixel + void Clear(Pixel p); + // Clears the rendering back buffer + void ClearBuffer(Pixel p, bool bDepth = true); + // Returns the font image + olc::Sprite* GetFontSprite(); + + // Clip a line segment to visible area + bool ClipLineToScreen(olc::vi2d& in_p1, olc::vi2d& in_p2); + + // Dont allow PGE to mark layers as dirty, so pixel graphics don't update + void EnablePixelTransfer(const bool bEnable = true); + + // Command Console Routines + void ConsoleShow(const olc::Key &keyExit, bool bSuspendTime = true); + bool IsConsoleShowing() const; + void ConsoleClear(); + std::stringstream& ConsoleOut(); + void ConsoleCaptureStdOut(const bool bCapture); + + // Text Entry Routines + void TextEntryEnable(const bool bEnable, const std::string& sText = ""); + std::string TextEntryGetString() const; + int32_t TextEntryGetCursor() const; + bool IsTextEntryEnabled() const; + + + + private: + void UpdateTextEntry(); + void UpdateConsole(); + + public: + + // Experimental Lightweight 3D Routines ================ +#ifdef OLC_ENABLE_EXPERIMENTAL + // Set Manual View Matrix + void LW3D_View(const std::array& m); + // Set Manual World Matrix + void LW3D_World(const std::array& m); + // Set Manual Projection Matrix + void LW3D_Projection(const std::array& m); + + // Draws a vector of vertices, interprted as individual triangles + void LW3D_DrawTriangles(olc::Decal* decal, const std::vector>& pos, const std::vector& tex, const std::vector& col); + + void LW3D_ModelTranslate(const float x, const float y, const float z); + + // Camera convenience functions + void LW3D_SetCameraAtTarget(const float fEyeX, const float fEyeY, const float fEyeZ, + const float fTargetX, const float fTargetY, const float fTargetZ, + const float fUpX = 0.0f, const float fUpY = 1.0f, const float fUpZ = 0.0f); + void LW3D_SetCameraAlongDirection(const float fEyeX, const float fEyeY, const float fEyeZ, + const float fDirX, const float fDirY, const float fDirZ, + const float fUpX = 0.0f, const float fUpY = 1.0f, const float fUpZ = 0.0f); + + // 3D Rendering Flags + void LW3D_EnableDepthTest(const bool bEnableDepth); + void LW3D_EnableBackfaceCulling(const bool bEnableCull); +#endif + public: // Branding + std::string sAppName; + + private: // Inner mysterious workings + olc::Sprite* pDrawTarget = nullptr; + Pixel::Mode nPixelMode = Pixel::NORMAL; + float fBlendFactor = 1.0f; + olc::vi2d vScreenSize = { 256, 240 }; + olc::vf2d vInvScreenSize = { 1.0f / 256.0f, 1.0f / 240.0f }; + olc::vi2d vPixelSize = { 4, 4 }; + olc::vi2d vScreenPixelSize = { 4, 4 }; + olc::vi2d vMousePos = { 0, 0 }; + int32_t nMouseWheelDelta = 0; + olc::vi2d vMousePosCache = { 0, 0 }; + olc::vi2d vMouseWindowPos = { 0, 0 }; + int32_t nMouseWheelDeltaCache = 0; + olc::vi2d vWindowPos = { 0, 0 }; + olc::vi2d vWindowSize = { 0, 0 }; + olc::vi2d vViewPos = { 0, 0 }; + olc::vi2d vViewSize = { 0,0 }; + bool bFullScreen = false; + olc::vf2d vPixel = { 1.0f, 1.0f }; + bool bHasInputFocus = false; + bool bHasMouseFocus = false; + bool bEnableVSYNC = false; + float fFrameTimer = 1.0f; + float fLastElapsed = 0.0f; + int nFrameCount = 0; + bool bSuspendTextureTransfer = false; + Renderable fontRenderable; + std::vector vLayers; + uint8_t nTargetLayer = 0; + uint32_t nLastFPS = 0; + bool bPixelCohesion = false; + DecalMode nDecalMode = DecalMode::NORMAL; + DecalStructure nDecalStructure = DecalStructure::FAN; + std::function funcPixelMode; + std::chrono::time_point m_tp1, m_tp2; + std::vector vFontSpacing; + std::vector vDroppedFiles; + std::vector vDroppedFilesCache; + olc::vi2d vDroppedFilesPoint; + olc::vi2d vDroppedFilesPointCache; + + // Command Console Specific + bool bConsoleShow = false; + bool bConsoleSuspendTime = false; + olc::Key keyConsoleExit = olc::Key::F1; + std::stringstream ssConsoleOutput; + std::streambuf* sbufOldCout = nullptr; + olc::vi2d vConsoleSize; + olc::vi2d vConsoleCursor = { 0,0 }; + olc::vf2d vConsoleCharacterScale = { 1.0f, 2.0f }; + std::vector sConsoleLines; + std::list sCommandHistory; + std::list::iterator sCommandHistoryIt; + + // Text Entry Specific + bool bTextEntryEnable = false; + std::string sTextEntryString = ""; + int32_t nTextEntryCursor = 0; + std::vector> vKeyboardMap; + + + + // State of keyboard + bool pKeyNewState[256] = { 0 }; + bool pKeyOldState[256] = { 0 }; + HWButton pKeyboardState[256] = { 0 }; + + // State of mouse + bool pMouseNewState[nMouseButtons] = { 0 }; + bool pMouseOldState[nMouseButtons] = { 0 }; + HWButton pMouseState[nMouseButtons] = { 0 }; + + // The main engine thread + void EngineThread(); + + + // If anything sets this flag to false, the engine + // "should" shut down gracefully + static std::atomic bAtomActive; + + public: + // "Break In" Functions + void olc_UpdateMouse(int32_t x, int32_t y); + void olc_UpdateMouseWheel(int32_t delta); + void olc_UpdateWindowPos(int32_t x, int32_t y); + void olc_UpdateWindowSize(int32_t x, int32_t y); + void olc_UpdateViewport(); + void olc_ConstructFontSheet(); + void olc_CoreUpdate(); + void olc_PrepareEngine(); + void olc_UpdateMouseState(int32_t button, bool state); + void olc_UpdateKeyState(int32_t key, bool state); + void olc_UpdateMouseFocus(bool state); + void olc_UpdateKeyFocus(bool state); + void olc_Terminate(); + void olc_DropFiles(int32_t x, int32_t y, const std::vector& vFiles); + void olc_Reanimate(); + bool olc_IsRunning(); + + // At the very end of this file, chooses which + // components to compile + virtual void olc_ConfigureSystem(); + + // NOTE: Items Here are to be deprecated, I have left them in for now + // in case you are using them, but they will be removed. + // olc::vf2d vSubPixelOffset = { 0.0f, 0.0f }; + + public: // PGEX Stuff + friend class PGEX; + void pgex_Register(olc::PGEX* pgex); + + private: + std::vector vExtensions; + }; + + + + // O------------------------------------------------------------------------------O + // | PGE EXTENSION BASE CLASS - Permits access to PGE functions from extension | + // O------------------------------------------------------------------------------O + class PGEX + { + friend class olc::PixelGameEngine; + public: + PGEX(bool bHook = false); + + protected: + virtual void OnBeforeUserCreate(); + virtual void OnAfterUserCreate(); + virtual bool OnBeforeUserUpdate(float &fElapsedTime); + virtual void OnAfterUserUpdate(float fElapsedTime); + + protected: + static PixelGameEngine* pge; + }; +} + +#pragma endregion + + +#pragma region opengl33_iface +// In order to facilitate more advanced graphics features, some PGEX +// will rely on shaders. Instead of having each PGEX responsible for +// managing this, for convenience, this interface exists. + +#if defined(OLC_GFX_OPENGL33) + + #if defined(OLC_PLATFORM_WINAPI) + #include + #define CALLSTYLE __stdcall + #endif + + #if defined(__linux__) || defined(__FreeBSD__) + #include + #endif + + #if defined(OLC_PLATFORM_X11) + namespace X11 { + #include + } + #define CALLSTYLE + #endif + + #if defined(__APPLE__) + #define GL_SILENCE_DEPRECATION + #include + #include + #include + #endif + + #if defined(OLC_PLATFORM_EMSCRIPTEN) + #include + #include + #define GL_GLEXT_PROTOTYPES + #include + #include + #define CALLSTYLE + #define GL_CLAMP GL_CLAMP_TO_EDGE + #endif + +namespace olc +{ + typedef char GLchar; + typedef ptrdiff_t GLsizeiptr; + + typedef GLuint CALLSTYLE locCreateShader_t(GLenum type); + typedef GLuint CALLSTYLE locCreateProgram_t(void); + typedef void CALLSTYLE locDeleteShader_t(GLuint shader); + typedef void CALLSTYLE locCompileShader_t(GLuint shader); + typedef void CALLSTYLE locLinkProgram_t(GLuint program); + typedef void CALLSTYLE locDeleteProgram_t(GLuint program); + typedef void CALLSTYLE locAttachShader_t(GLuint program, GLuint shader); + typedef void CALLSTYLE locBindBuffer_t(GLenum target, GLuint buffer); + typedef void CALLSTYLE locBufferData_t(GLenum target, GLsizeiptr size, const void* data, GLenum usage); + typedef void CALLSTYLE locGenBuffers_t(GLsizei n, GLuint* buffers); + typedef void CALLSTYLE locVertexAttribPointer_t(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void* pointer); + typedef void CALLSTYLE locEnableVertexAttribArray_t(GLuint index); + typedef void CALLSTYLE locUseProgram_t(GLuint program); + typedef void CALLSTYLE locBindVertexArray_t(GLuint array); + typedef void CALLSTYLE locGenVertexArrays_t(GLsizei n, GLuint* arrays); + typedef void CALLSTYLE locGetShaderInfoLog_t(GLuint shader, GLsizei bufSize, GLsizei* length, GLchar* infoLog); + typedef GLint CALLSTYLE locGetUniformLocation_t(GLuint program, const GLchar* name); + typedef void CALLSTYLE locUniform1f_t(GLint location, GLfloat v0); + typedef void CALLSTYLE locUniform1i_t(GLint location, GLint v0); + typedef void CALLSTYLE locUniform2fv_t(GLint location, GLsizei count, const GLfloat* value); + typedef void CALLSTYLE locActiveTexture_t(GLenum texture); + typedef void CALLSTYLE locGenFrameBuffers_t(GLsizei n, GLuint* ids); + typedef void CALLSTYLE locBindFrameBuffer_t(GLenum target, GLuint fb); + typedef GLenum CALLSTYLE locCheckFrameBufferStatus_t(GLenum target); + typedef void CALLSTYLE locDeleteFrameBuffers_t(GLsizei n, const GLuint* fbs); + typedef void CALLSTYLE locFrameBufferTexture2D_t(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); + typedef void CALLSTYLE locDrawBuffers_t(GLsizei n, const GLenum* bufs); + typedef void CALLSTYLE locBlendFuncSeparate_t(GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); + +#if defined(OLC_PLATFORM_WINAPI) + typedef void __stdcall locSwapInterval_t(GLsizei n); +#endif + +#if defined(OLC_PLATFORM_X11) + typedef int(locSwapInterval_t)(X11::Display* dpy, X11::GLXDrawable drawable, int interval); +#endif + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + typedef void CALLSTYLE locShaderSource_t(GLuint shader, GLsizei count, const GLchar* const* string, const GLint* length); + typedef EGLBoolean(locSwapInterval_t)(EGLDisplay display, EGLint interval); +#else + typedef void CALLSTYLE locShaderSource_t(GLuint shader, GLsizei count, const GLchar** string, const GLint* length); +#endif + +} // olc namespace +#endif // OpenGL33 Definitions +#pragma endregion + + +#endif // OLC_PGE_DEF + + +// O------------------------------------------------------------------------------O +// | START OF OLC_PGE_APPLICATION | +// O------------------------------------------------------------------------------O +#ifdef OLC_PGE_APPLICATION +#undef OLC_PGE_APPLICATION + +// O------------------------------------------------------------------------------O +// | olcPixelGameEngine INTERFACE IMPLEMENTATION (CORE) | +// | Note: The core implementation is platform independent | +// O------------------------------------------------------------------------------O +#pragma region pge_implementation +namespace olc +{ + // O------------------------------------------------------------------------------O + // | olc::Pixel IMPLEMENTATION | + // O------------------------------------------------------------------------------O + Pixel::Pixel() + { r = 0; g = 0; b = 0; a = nDefaultAlpha; } + + Pixel::Pixel(uint8_t red, uint8_t green, uint8_t blue, uint8_t alpha) + { n = red | (green << 8) | (blue << 16) | (alpha << 24); } // Thanks jarekpelczar + + Pixel::Pixel(uint32_t p) + { n = p; } + + bool Pixel::operator==(const Pixel& p) const + { return n == p.n; } + + bool Pixel::operator!=(const Pixel& p) const + { return n != p.n; } + + Pixel Pixel::operator * (const float i) const + { + float fR = std::min(255.0f, std::max(0.0f, float(r) * i)); + float fG = std::min(255.0f, std::max(0.0f, float(g) * i)); + float fB = std::min(255.0f, std::max(0.0f, float(b) * i)); + return Pixel(uint8_t(fR), uint8_t(fG), uint8_t(fB), a); + } + + Pixel Pixel::operator / (const float i) const + { + float fR = std::min(255.0f, std::max(0.0f, float(r) / i)); + float fG = std::min(255.0f, std::max(0.0f, float(g) / i)); + float fB = std::min(255.0f, std::max(0.0f, float(b) / i)); + return Pixel(uint8_t(fR), uint8_t(fG), uint8_t(fB), a); + } + + Pixel& Pixel::operator *=(const float i) + { + this->r = uint8_t(std::min(255.0f, std::max(0.0f, float(r) * i))); + this->g = uint8_t(std::min(255.0f, std::max(0.0f, float(g) * i))); + this->b = uint8_t(std::min(255.0f, std::max(0.0f, float(b) * i))); + return *this; + } + + Pixel& Pixel::operator /=(const float i) + { + this->r = uint8_t(std::min(255.0f, std::max(0.0f, float(r) / i))); + this->g = uint8_t(std::min(255.0f, std::max(0.0f, float(g) / i))); + this->b = uint8_t(std::min(255.0f, std::max(0.0f, float(b) / i))); + return *this; + } + + Pixel Pixel::operator + (const Pixel& p) const + { + uint8_t nR = uint8_t(std::min(255, std::max(0, int(r) + int(p.r)))); + uint8_t nG = uint8_t(std::min(255, std::max(0, int(g) + int(p.g)))); + uint8_t nB = uint8_t(std::min(255, std::max(0, int(b) + int(p.b)))); + return Pixel(nR, nG, nB, a); + } + + Pixel Pixel::operator - (const Pixel& p) const + { + uint8_t nR = uint8_t(std::min(255, std::max(0, int(r) - int(p.r)))); + uint8_t nG = uint8_t(std::min(255, std::max(0, int(g) - int(p.g)))); + uint8_t nB = uint8_t(std::min(255, std::max(0, int(b) - int(p.b)))); + return Pixel(nR, nG, nB, a); + } + + Pixel& Pixel::operator += (const Pixel& p) + { + this->r = uint8_t(std::min(255, std::max(0, int(r) + int(p.r)))); + this->g = uint8_t(std::min(255, std::max(0, int(g) + int(p.g)))); + this->b = uint8_t(std::min(255, std::max(0, int(b) + int(p.b)))); + return *this; + } + + Pixel& Pixel::operator -= (const Pixel& p) // Thanks Au Lit + { + this->r = uint8_t(std::min(255, std::max(0, int(r) - int(p.r)))); + this->g = uint8_t(std::min(255, std::max(0, int(g) - int(p.g)))); + this->b = uint8_t(std::min(255, std::max(0, int(b) - int(p.b)))); + return *this; + } + + Pixel Pixel::operator * (const Pixel& p) const + { + uint8_t nR = uint8_t(std::min(255.0f, std::max(0.0f, float(r) * float(p.r) / 255.0f))); + uint8_t nG = uint8_t(std::min(255.0f, std::max(0.0f, float(g) * float(p.g) / 255.0f))); + uint8_t nB = uint8_t(std::min(255.0f, std::max(0.0f, float(b) * float(p.b) / 255.0f))); + uint8_t nA = uint8_t(std::min(255.0f, std::max(0.0f, float(a) * float(p.a) / 255.0f))); + return Pixel(nR, nG, nB, nA); + } + + Pixel& Pixel::operator *=(const Pixel& p) + { + this->r = uint8_t(std::min(255.0f, std::max(0.0f, float(r) * float(p.r) / 255.0f))); + this->g = uint8_t(std::min(255.0f, std::max(0.0f, float(g) * float(p.g) / 255.0f))); + this->b = uint8_t(std::min(255.0f, std::max(0.0f, float(b) * float(p.b) / 255.0f))); + this->a = uint8_t(std::min(255.0f, std::max(0.0f, float(a) * float(p.a) / 255.0f))); + return *this; + } + + Pixel Pixel::inv() const + { + uint8_t nR = uint8_t(std::min(255, std::max(0, 255 - int(r)))); + uint8_t nG = uint8_t(std::min(255, std::max(0, 255 - int(g)))); + uint8_t nB = uint8_t(std::min(255, std::max(0, 255 - int(b)))); + return Pixel(nR, nG, nB, a); + } + + Pixel PixelF(float red, float green, float blue, float alpha) + { return Pixel(uint8_t(red * 255.0f), uint8_t(green * 255.0f), uint8_t(blue * 255.0f), uint8_t(alpha * 255.0f)); } + + Pixel PixelLerp(const olc::Pixel& p1, const olc::Pixel& p2, float t) + { return (p2 * t) + p1 * (1.0f - t); } + + // O------------------------------------------------------------------------------O + // | olc::Sprite IMPLEMENTATION | + // O------------------------------------------------------------------------------O + Sprite::Sprite() + { width = 0; height = 0; } + + Sprite::Sprite(const std::string& sImageFile, olc::ResourcePack* pack) + { LoadFromFile(sImageFile, pack); } + + Sprite::Sprite(int32_t w, int32_t h) + { + width = w; height = h; + pColData.resize(width * height); + pColData.resize(width * height, nDefaultPixel); + } + + Sprite::~Sprite() + { pColData.clear(); } + + void Sprite::SetSampleMode(olc::Sprite::Mode mode) + { modeSample = mode; } + + Pixel Sprite::GetPixel(const olc::vi2d& a) const + { return GetPixel(a.x, a.y); } + + bool Sprite::SetPixel(const olc::vi2d& a, Pixel p) + { return SetPixel(a.x, a.y, p); } + + Pixel Sprite::GetPixel(int32_t x, int32_t y) const + { + if (modeSample == olc::Sprite::Mode::NORMAL) + { + if (x >= 0 && x < width && y >= 0 && y < height) + return pColData[y * width + x]; + else + return Pixel(0, 0, 0, 0); + } + else + { + if (modeSample == olc::Sprite::Mode::PERIODIC) + return pColData[abs(y % height) * width + abs(x % width)]; + else + return pColData[std::max(0, std::min(y, height-1)) * width + std::max(0, std::min(x, width-1))]; + } + } + + bool Sprite::SetPixel(int32_t x, int32_t y, Pixel p) + { + if (x >= 0 && x < width && y >= 0 && y < height) + { + pColData[y * width + x] = p; + return true; + } + else + return false; + } + + Pixel Sprite::Sample(float x, float y) const + { + int32_t sx = std::min((int32_t)((x * (float)width)), width - 1); + int32_t sy = std::min((int32_t)((y * (float)height)), height - 1); + return GetPixel(sx, sy); + } + + Pixel Sprite::Sample(const olc::vf2d& uv) const + { + return Sample(uv.x, uv.y); + } + + Pixel Sprite::SampleBL(float u, float v) const + { + u = u * width - 0.5f; + v = v * height - 0.5f; + int x = (int)floor(u); // cast to int rounds toward zero, not downward + int y = (int)floor(v); // Thanks @joshinils + float u_ratio = u - x; + float v_ratio = v - y; + float u_opposite = 1 - u_ratio; + float v_opposite = 1 - v_ratio; + + olc::Pixel p1 = GetPixel(std::max(x, 0), std::max(y, 0)); + olc::Pixel p2 = GetPixel(std::min(x + 1, (int)width - 1), std::max(y, 0)); + olc::Pixel p3 = GetPixel(std::max(x, 0), std::min(y + 1, (int)height - 1)); + olc::Pixel p4 = GetPixel(std::min(x + 1, (int)width - 1), std::min(y + 1, (int)height - 1)); + + return olc::Pixel( + (uint8_t)((p1.r * u_opposite + p2.r * u_ratio) * v_opposite + (p3.r * u_opposite + p4.r * u_ratio) * v_ratio), + (uint8_t)((p1.g * u_opposite + p2.g * u_ratio) * v_opposite + (p3.g * u_opposite + p4.g * u_ratio) * v_ratio), + (uint8_t)((p1.b * u_opposite + p2.b * u_ratio) * v_opposite + (p3.b * u_opposite + p4.b * u_ratio) * v_ratio)); + } + + Pixel Sprite::SampleBL(const olc::vf2d& uv) const + { + return SampleBL(uv.x, uv.y); + } + + Pixel* Sprite::GetData() + { return pColData.data(); } + + + olc::rcode Sprite::LoadFromFile(const std::string& sImageFile, olc::ResourcePack* pack) + { + UNUSED(pack); + return loader->LoadImageResource(this, sImageFile, pack); + } + + olc::Sprite* Sprite::Duplicate() + { + olc::Sprite* spr = new olc::Sprite(width, height); + std::memcpy(spr->GetData(), GetData(), width * height * sizeof(olc::Pixel)); + spr->modeSample = modeSample; + return spr; + } + + olc::Sprite* Sprite::Duplicate(const olc::vi2d& vPos, const olc::vi2d& vSize) + { + olc::Sprite* spr = new olc::Sprite(vSize.x, vSize.y); + for (int y = 0; y < vSize.y; y++) + for (int x = 0; x < vSize.x; x++) + spr->SetPixel(x, y, GetPixel(vPos.x + x, vPos.y + y)); + return spr; + } + + olc::vi2d olc::Sprite::Size() const + { + return { width, height }; + } + + // O------------------------------------------------------------------------------O + // | olc::Decal IMPLEMENTATION | + // O------------------------------------------------------------------------------O + Decal::Decal(olc::Sprite* spr, bool filter, bool clamp) + { + id = -1; + if (spr == nullptr) return; + sprite = spr; + id = renderer->CreateTexture(sprite->width, sprite->height, filter, clamp); + Update(); + } + + Decal::Decal(const uint32_t nExistingTextureResource, olc::Sprite* spr) + { + if (spr == nullptr) return; + id = nExistingTextureResource; + } + + void Decal::Update() + { + if (sprite == nullptr) return; + vUVScale = { 1.0f / float(sprite->width), 1.0f / float(sprite->height) }; + renderer->ApplyTexture(id); + renderer->UpdateTexture(id, sprite); + } + + void Decal::UpdateSprite() + { + if (sprite == nullptr) return; + renderer->ApplyTexture(id); + renderer->ReadTexture(id, sprite); + } + + Decal::~Decal() + { + if (id != -1) + { + renderer->DeleteTexture(id); + id = -1; + } + } + + void Renderable::Create(uint32_t width, uint32_t height, bool filter, bool clamp) + { + pSprite = std::make_unique(width, height); + pDecal = std::make_unique(pSprite.get(), filter, clamp); + } + + olc::rcode Renderable::Load(const std::string& sFile, ResourcePack* pack, bool filter, bool clamp) + { + pSprite = std::make_unique(); + if (pSprite->LoadFromFile(sFile, pack) == olc::rcode::OK) + { + pDecal = std::make_unique(pSprite.get(), filter, clamp); + return olc::rcode::OK; + } + else + { + pSprite.release(); + pSprite = nullptr; + return olc::rcode::NO_FILE; + } + } + + olc::Decal* Renderable::Decal() const + { return pDecal.get(); } + + olc::Sprite* Renderable::Sprite() const + { return pSprite.get(); } + + // O------------------------------------------------------------------------------O + // | olc::ResourcePack IMPLEMENTATION | + // O------------------------------------------------------------------------------O + + + //============================================================= + // Resource Packs - Allows you to store files in one large + // scrambled file - Thanks MaGetzUb for debugging a null char in std::stringstream bug + ResourceBuffer::ResourceBuffer(std::ifstream& ifs, uint32_t offset, uint32_t size) + { + vMemory.resize(size); + ifs.seekg(offset); ifs.read(vMemory.data(), vMemory.size()); + setg(vMemory.data(), vMemory.data(), vMemory.data() + size); + } + + ResourcePack::ResourcePack() { } + ResourcePack::~ResourcePack() { baseFile.close(); } + + bool ResourcePack::AddFile(const std::string& sFile) + { + const std::string file = makeposix(sFile); + + if (_gfs::exists(file)) + { + sResourceFile e; + e.nSize = (uint32_t)_gfs::file_size(file); + e.nOffset = 0; // Unknown at this stage + mapFiles[file] = e; + return true; + } + return false; + } + + bool ResourcePack::LoadPack(const std::string& sFile, const std::string& sKey) + { + // Open the resource file + baseFile.open(sFile, std::ifstream::binary); + if (!baseFile.is_open()) return false; + + // 1) Read Scrambled index + uint32_t nIndexSize = 0; + baseFile.read((char*)&nIndexSize, sizeof(uint32_t)); + + std::vector buffer(nIndexSize); + for (uint32_t j = 0; j < nIndexSize; j++) + buffer[j] = baseFile.get(); + + std::vector decoded = scramble(buffer, sKey); + size_t pos = 0; + auto read = [&decoded, &pos](char* dst, size_t size) { + memcpy((void*)dst, (const void*)(decoded.data() + pos), size); + pos += size; + }; + + auto get = [&read]() -> int { char c; read(&c, 1); return c; }; + + // 2) Read Map + uint32_t nMapEntries = 0; + read((char*)&nMapEntries, sizeof(uint32_t)); + for (uint32_t i = 0; i < nMapEntries; i++) + { + uint32_t nFilePathSize = 0; + read((char*)&nFilePathSize, sizeof(uint32_t)); + + std::string sFileName(nFilePathSize, ' '); + for (uint32_t j = 0; j < nFilePathSize; j++) + sFileName[j] = get(); + + sResourceFile e; + read((char*)&e.nSize, sizeof(uint32_t)); + read((char*)&e.nOffset, sizeof(uint32_t)); + mapFiles[sFileName] = e; + } + + // Don't close base file! we will provide a stream + // pointer when the file is requested + return true; + } + + bool ResourcePack::SavePack(const std::string& sFile, const std::string& sKey) + { + // Create/Overwrite the resource file + std::ofstream ofs(sFile, std::ofstream::binary); + if (!ofs.is_open()) return false; + + // Iterate through map + uint32_t nIndexSize = 0; // Unknown for now + ofs.write((char*)&nIndexSize, sizeof(uint32_t)); + uint32_t nMapSize = uint32_t(mapFiles.size()); + ofs.write((char*)&nMapSize, sizeof(uint32_t)); + for (auto& e : mapFiles) + { + // Write the path of the file + size_t nPathSize = e.first.size(); + ofs.write((char*)&nPathSize, sizeof(uint32_t)); + ofs.write(e.first.c_str(), nPathSize); + + // Write the file entry properties + ofs.write((char*)&e.second.nSize, sizeof(uint32_t)); + ofs.write((char*)&e.second.nOffset, sizeof(uint32_t)); + } + + // 2) Write the individual Data + std::streampos offset = ofs.tellp(); + nIndexSize = (uint32_t)offset; + for (auto& e : mapFiles) + { + // Store beginning of file offset within resource pack file + e.second.nOffset = (uint32_t)offset; + + // Load the file to be added + std::vector vBuffer(e.second.nSize); + std::ifstream i(e.first, std::ifstream::binary); + i.read((char*)vBuffer.data(), e.second.nSize); + i.close(); + + // Write the loaded file into resource pack file + ofs.write((char*)vBuffer.data(), e.second.nSize); + offset += e.second.nSize; + } + + // 3) Scramble Index + std::vector stream; + auto write = [&stream](const char* data, size_t size) { + size_t sizeNow = stream.size(); + stream.resize(sizeNow + size); + memcpy(stream.data() + sizeNow, data, size); + }; + + // Iterate through map + write((char*)&nMapSize, sizeof(uint32_t)); + for (auto& e : mapFiles) + { + // Write the path of the file + size_t nPathSize = e.first.size(); + write((char*)&nPathSize, sizeof(uint32_t)); + write(e.first.c_str(), nPathSize); + + // Write the file entry properties + write((char*)&e.second.nSize, sizeof(uint32_t)); + write((char*)&e.second.nOffset, sizeof(uint32_t)); + } + std::vector sIndexString = scramble(stream, sKey); + uint32_t nIndexStringLen = uint32_t(sIndexString.size()); + // 4) Rewrite Map (it has been updated with offsets now) + // at start of file + ofs.seekp(0, std::ios::beg); + ofs.write((char*)&nIndexStringLen, sizeof(uint32_t)); + ofs.write(sIndexString.data(), nIndexStringLen); + ofs.close(); + return true; + } + + ResourceBuffer ResourcePack::GetFileBuffer(const std::string& sFile) + { return ResourceBuffer(baseFile, mapFiles[sFile].nOffset, mapFiles[sFile].nSize); } + + bool ResourcePack::Loaded() + { return baseFile.is_open(); } + + std::vector ResourcePack::scramble(const std::vector& data, const std::string& key) + { + if (key.empty()) return data; + std::vector o; + size_t c = 0; + for (auto s : data) o.push_back(s ^ key[(c++) % key.size()]); + return o; + }; + + std::string ResourcePack::makeposix(const std::string& path) + { + std::string o; + for (auto s : path) o += std::string(1, s == '\\' ? '/' : s); + return o; + }; + + // O------------------------------------------------------------------------------O + // | olc::PixelGameEngine IMPLEMENTATION | + // O------------------------------------------------------------------------------O + PixelGameEngine::PixelGameEngine() + { + sAppName = "Undefined"; + olc::PGEX::pge = this; + + // Bring in relevant Platform & Rendering systems depending + // on compiler parameters + olc_ConfigureSystem(); + } + + PixelGameEngine::~PixelGameEngine() + {} + + + olc::rcode PixelGameEngine::Construct(int32_t screen_w, int32_t screen_h, int32_t pixel_w, int32_t pixel_h, bool full_screen, bool vsync, bool cohesion) + { + bPixelCohesion = cohesion; + vScreenSize = { screen_w, screen_h }; + vInvScreenSize = { 1.0f / float(screen_w), 1.0f / float(screen_h) }; + vPixelSize = { pixel_w, pixel_h }; + vWindowSize = vScreenSize * vPixelSize; + bFullScreen = full_screen; + bEnableVSYNC = vsync; + vPixel = 2.0f / vScreenSize; + + if (vPixelSize.x <= 0 || vPixelSize.y <= 0 || vScreenSize.x <= 0 || vScreenSize.y <= 0) + return olc::FAIL; + return olc::OK; + } + + + void PixelGameEngine::SetScreenSize(int w, int h) + { + vScreenSize = { w, h }; + vInvScreenSize = { 1.0f / float(w), 1.0f / float(h) }; + for (auto& layer : vLayers) + { + layer.pDrawTarget.Create(vScreenSize.x, vScreenSize.y); + layer.bUpdate = true; + } + SetDrawTarget(nullptr); + renderer->ClearBuffer(olc::BLACK, true); + renderer->DisplayFrame(); + renderer->ClearBuffer(olc::BLACK, true); + renderer->UpdateViewport(vViewPos, vViewSize); + } + +#if !defined(PGE_USE_CUSTOM_START) + olc::rcode PixelGameEngine::Start() + { + if (platform->ApplicationStartUp() != olc::OK) return olc::FAIL; + + // Construct the window + if (platform->CreateWindowPane({ 30,30 }, vWindowSize, bFullScreen) != olc::OK) return olc::FAIL; + olc_UpdateWindowPos(30,30); + olc_UpdateWindowSize(vWindowSize.x, vWindowSize.y); + + // Start the thread + bAtomActive = true; + std::thread t = std::thread(&PixelGameEngine::EngineThread, this); + + // Some implementations may form an event loop here + platform->StartSystemEventLoop(); + + // Wait for thread to be exited + t.join(); + + if (platform->ApplicationCleanUp() != olc::OK) return olc::FAIL; + + return olc::OK; + } +#endif + + void PixelGameEngine::SetDrawTarget(Sprite* target) + { + if (target) + { + pDrawTarget = target; + } + else + { + nTargetLayer = 0; + pDrawTarget = vLayers[0].pDrawTarget.Sprite(); + } + } + + void PixelGameEngine::SetDrawTarget(uint8_t layer, bool bDirty) + { + if (layer < vLayers.size()) + { + pDrawTarget = vLayers[layer].pDrawTarget.Sprite(); + vLayers[layer].bUpdate = bDirty; + nTargetLayer = layer; + } + } + + void PixelGameEngine::EnableLayer(uint8_t layer, bool b) + { if (layer < vLayers.size()) vLayers[layer].bShow = b; } + + void PixelGameEngine::SetLayerOffset(uint8_t layer, const olc::vf2d& offset) + { SetLayerOffset(layer, offset.x, offset.y); } + + void PixelGameEngine::SetLayerOffset(uint8_t layer, float x, float y) + { if (layer < vLayers.size()) vLayers[layer].vOffset = { x, y }; } + + void PixelGameEngine::SetLayerScale(uint8_t layer, const olc::vf2d& scale) + { SetLayerScale(layer, scale.x, scale.y); } + + void PixelGameEngine::SetLayerScale(uint8_t layer, float x, float y) + { if (layer < vLayers.size()) vLayers[layer].vScale = { x, y }; } + + void PixelGameEngine::SetLayerTint(uint8_t layer, const olc::Pixel& tint) + { if (layer < vLayers.size()) vLayers[layer].tint = tint; } + + void PixelGameEngine::SetLayerCustomRenderFunction(uint8_t layer, std::function f) + { if (layer < vLayers.size()) vLayers[layer].funcHook = f; } + + std::vector& PixelGameEngine::GetLayers() + { return vLayers; } + + uint32_t PixelGameEngine::CreateLayer() + { + LayerDesc ld; + ld.pDrawTarget.Create(vScreenSize.x, vScreenSize.y); + vLayers.push_back(std::move(ld)); + return uint32_t(vLayers.size()) - 1; + } + + Sprite* PixelGameEngine::GetDrawTarget() const + { return pDrawTarget; } + + int32_t PixelGameEngine::GetDrawTargetWidth() const + { + if (pDrawTarget) + return pDrawTarget->width; + else + return 0; + } + + int32_t PixelGameEngine::GetDrawTargetHeight() const + { + if (pDrawTarget) + return pDrawTarget->height; + else + return 0; + } + + uint32_t PixelGameEngine::GetFPS() const + { return nLastFPS; } + + bool PixelGameEngine::IsFocused() const + { return bHasInputFocus; } + + HWButton PixelGameEngine::GetKey(Key k) const + { return pKeyboardState[k]; } + + HWButton PixelGameEngine::GetMouse(uint32_t b) const + { return pMouseState[b]; } + + int32_t PixelGameEngine::GetMouseX() const + { return vMousePos.x; } + + int32_t PixelGameEngine::GetMouseY() const + { return vMousePos.y; } + + const olc::vi2d& PixelGameEngine::GetMousePos() const + { return vMousePos; } + + int32_t PixelGameEngine::GetMouseWheel() const + { return nMouseWheelDelta; } + + int32_t PixelGameEngine::ScreenWidth() const + { return vScreenSize.x; } + + int32_t PixelGameEngine::ScreenHeight() const + { return vScreenSize.y; } + + float PixelGameEngine::GetElapsedTime() const + { return fLastElapsed; } + + const bool PixelGameEngine::IsMouseInsideWindow() const + { return GetMouseX()>=0&&GetMouseY()>=0&&GetMouseX()SetPixel(x, y, p); + } + + if (nPixelMode == Pixel::MASK) + { + if (p.a == 255) + return pDrawTarget->SetPixel(x, y, p); + } + + if (nPixelMode == Pixel::ALPHA) + { + Pixel d = pDrawTarget->GetPixel(x, y); + float a = (float)(p.a / 255.0f) * fBlendFactor; + float c = 1.0f - a; + float r = a * (float)p.r + c * (float)d.r; + float g = a * (float)p.g + c * (float)d.g; + float b = a * (float)p.b + c * (float)d.b; + return pDrawTarget->SetPixel(x, y, Pixel((uint8_t)r, (uint8_t)g, (uint8_t)b/*, (uint8_t)(p.a * fBlendFactor)*/)); + } + + if (nPixelMode == Pixel::CUSTOM) + { + return pDrawTarget->SetPixel(x, y, funcPixelMode(x, y, p, pDrawTarget->GetPixel(x, y))); + } + + return false; + } + + + void PixelGameEngine::DrawLine(const olc::vi2d& pos1, const olc::vi2d& pos2, Pixel p, uint32_t pattern) + { DrawLine(pos1.x, pos1.y, pos2.x, pos2.y, p, pattern); } + + void PixelGameEngine::DrawLine(int32_t x1, int32_t y1, int32_t x2, int32_t y2, Pixel p, uint32_t pattern) + { + int x, y, dx, dy, dx1, dy1, px, py, xe, ye, i; + dx = x2 - x1; dy = y2 - y1; + + auto rol = [&](void) { pattern = (pattern << 1) | (pattern >> 31); return pattern & 1; }; + + olc::vi2d p1(x1, y1), p2(x2, y2); + //if (!ClipLineToScreen(p1, p2)) + // return; + x1 = p1.x; y1 = p1.y; + x2 = p2.x; y2 = p2.y; + + // straight lines idea by gurkanctn + if (dx == 0) // Line is vertical + { + if (y2 < y1) std::swap(y1, y2); + for (y = y1; y <= y2; y++) if (rol()) Draw(x1, y, p); + return; + } + + if (dy == 0) // Line is horizontal + { + if (x2 < x1) std::swap(x1, x2); + for (x = x1; x <= x2; x++) if (rol()) Draw(x, y1, p); + return; + } + + // Line is Funk-aye + dx1 = abs(dx); dy1 = abs(dy); + px = 2 * dy1 - dx1; py = 2 * dx1 - dy1; + if (dy1 <= dx1) + { + if (dx >= 0) + { + x = x1; y = y1; xe = x2; + } + else + { + x = x2; y = y2; xe = x1; + } + + if (rol()) Draw(x, y, p); + + for (i = 0; x < xe; i++) + { + x = x + 1; + if (px < 0) + px = px + 2 * dy1; + else + { + if ((dx < 0 && dy < 0) || (dx > 0 && dy > 0)) y = y + 1; else y = y - 1; + px = px + 2 * (dy1 - dx1); + } + if (rol()) Draw(x, y, p); + } + } + else + { + if (dy >= 0) + { + x = x1; y = y1; ye = y2; + } + else + { + x = x2; y = y2; ye = y1; + } + + if (rol()) Draw(x, y, p); + + for (i = 0; y < ye; i++) + { + y = y + 1; + if (py <= 0) + py = py + 2 * dx1; + else + { + if ((dx < 0 && dy < 0) || (dx > 0 && dy > 0)) x = x + 1; else x = x - 1; + py = py + 2 * (dx1 - dy1); + } + if (rol()) Draw(x, y, p); + } + } + } + + void PixelGameEngine::DrawCircle(const olc::vi2d& pos, int32_t radius, Pixel p, uint8_t mask) + { DrawCircle(pos.x, pos.y, radius, p, mask); } + + void PixelGameEngine::DrawCircle(int32_t x, int32_t y, int32_t radius, Pixel p, uint8_t mask) + { // Thanks to IanM-Matrix1 #PR121 + if (radius < 0 || x < -radius || y < -radius || x - GetDrawTargetWidth() > radius || y - GetDrawTargetHeight() > radius) + return; + + if (radius > 0) + { + int x0 = 0; + int y0 = radius; + int d = 3 - 2 * radius; + + while (y0 >= x0) // only formulate 1/8 of circle + { + // Draw even octants + if (mask & 0x01) Draw(x + x0, y - y0, p);// Q6 - upper right right + if (mask & 0x04) Draw(x + y0, y + x0, p);// Q4 - lower lower right + if (mask & 0x10) Draw(x - x0, y + y0, p);// Q2 - lower left left + if (mask & 0x40) Draw(x - y0, y - x0, p);// Q0 - upper upper left + if (x0 != 0 && x0 != y0) + { + if (mask & 0x02) Draw(x + y0, y - x0, p);// Q7 - upper upper right + if (mask & 0x08) Draw(x + x0, y + y0, p);// Q5 - lower right right + if (mask & 0x20) Draw(x - y0, y + x0, p);// Q3 - lower lower left + if (mask & 0x80) Draw(x - x0, y - y0, p);// Q1 - upper left left + } + + if (d < 0) + d += 4 * x0++ + 6; + else + d += 4 * (x0++ - y0--) + 10; + } + } + else + Draw(x, y, p); + } + + void PixelGameEngine::FillCircle(const olc::vi2d& pos, int32_t radius, Pixel p) + { FillCircle(pos.x, pos.y, radius, p); } + + void PixelGameEngine::FillCircle(int32_t x, int32_t y, int32_t radius, Pixel p) + { // Thanks to IanM-Matrix1 #PR121 + if (radius < 0 || x < -radius || y < -radius || x - GetDrawTargetWidth() > radius || y - GetDrawTargetHeight() > radius) + return; + + if (radius > 0) + { + int x0 = 0; + int y0 = radius; + int d = 3 - 2 * radius; + + auto drawline = [&](int sx, int ex, int y) + { + for (int x = sx; x <= ex; x++) + Draw(x, y, p); + }; + + while (y0 >= x0) + { + drawline(x - y0, x + y0, y - x0); + if (x0 > 0) drawline(x - y0, x + y0, y + x0); + + if (d < 0) + d += 4 * x0++ + 6; + else + { + if (x0 != y0) + { + drawline(x - x0, x + x0, y - y0); + drawline(x - x0, x + x0, y + y0); + } + d += 4 * (x0++ - y0--) + 10; + } + } + } + else + Draw(x, y, p); + } + + void PixelGameEngine::DrawRect(const olc::vi2d& pos, const olc::vi2d& size, Pixel p) + { DrawRect(pos.x, pos.y, size.x, size.y, p); } + + void PixelGameEngine::DrawRect(int32_t x, int32_t y, int32_t w, int32_t h, Pixel p) + { + DrawLine(x, y, x + w, y, p); + DrawLine(x + w, y, x + w, y + h, p); + DrawLine(x + w, y + h, x, y + h, p); + DrawLine(x, y + h, x, y, p); + } + + void PixelGameEngine::Clear(Pixel p) + { + int pixels = GetDrawTargetWidth() * GetDrawTargetHeight(); + Pixel* m = GetDrawTarget()->GetData(); + for (int i = 0; i < pixels; i++) m[i] = p; + } + + void PixelGameEngine::ClearBuffer(Pixel p, bool bDepth) + { renderer->ClearBuffer(p, bDepth); } + + olc::Sprite* PixelGameEngine::GetFontSprite() + { return fontRenderable.Sprite(); } + + bool PixelGameEngine::ClipLineToScreen(olc::vi2d& in_p1, olc::vi2d& in_p2) + { + // https://en.wikipedia.org/wiki/Cohen%E2%80%93Sutherland_algorithm + static constexpr int SEG_I = 0b0000, SEG_L = 0b0001, SEG_R = 0b0010, SEG_B = 0b0100, SEG_T = 0b1000; + auto Segment = [&vScreenSize = vScreenSize](const olc::vi2d& v) + { + int i = SEG_I; + if (v.x < 0) i |= SEG_L; else if (v.x > vScreenSize.x) i |= SEG_R; + if (v.y < 0) i |= SEG_B; else if (v.y > vScreenSize.y) i |= SEG_T; + return i; + }; + + int s1 = Segment(in_p1), s2 = Segment(in_p2); + + while (true) + { + if (!(s1 | s2)) return true; + else if (s1 & s2) return false; + else + { + int s3 = s2 > s1 ? s2 : s1; + olc::vi2d n; + if (s3 & SEG_T) { n.x = in_p1.x + (in_p2.x - in_p1.x) * (vScreenSize.y - in_p1.y) / (in_p2.y - in_p1.y); n.y = vScreenSize.y; } + else if (s3 & SEG_B) { n.x = in_p1.x + (in_p2.x - in_p1.x) * (0 - in_p1.y) / (in_p2.y - in_p1.y); n.y = 0; } + else if (s3 & SEG_R) { n.x = vScreenSize.x; n.y = in_p1.y + (in_p2.y - in_p1.y) * (vScreenSize.x - in_p1.x) / (in_p2.x - in_p1.x); } + else if (s3 & SEG_L) { n.x = 0; n.y = in_p1.y + (in_p2.y - in_p1.y) * (0 - in_p1.x) / (in_p2.x - in_p1.x); } + if (s3 == s1) { in_p1 = n; s1 = Segment(in_p1); } + else { in_p2 = n; s2 = Segment(in_p2); } + } + } + return true; + } + + void PixelGameEngine::EnablePixelTransfer(const bool bEnable) + { + bSuspendTextureTransfer = !bEnable; + } + + + void PixelGameEngine::FillRect(const olc::vi2d& pos, const olc::vi2d& size, Pixel p) + { FillRect(pos.x, pos.y, size.x, size.y, p); } + + void PixelGameEngine::FillRect(int32_t x, int32_t y, int32_t w, int32_t h, Pixel p) + { + int32_t x2 = x + w; + int32_t y2 = y + h; + + if (x < 0) x = 0; + if (x >= (int32_t)GetDrawTargetWidth()) x = (int32_t)GetDrawTargetWidth(); + if (y < 0) y = 0; + if (y >= (int32_t)GetDrawTargetHeight()) y = (int32_t)GetDrawTargetHeight(); + + if (x2 < 0) x2 = 0; + if (x2 >= (int32_t)GetDrawTargetWidth()) x2 = (int32_t)GetDrawTargetWidth(); + if (y2 < 0) y2 = 0; + if (y2 >= (int32_t)GetDrawTargetHeight()) y2 = (int32_t)GetDrawTargetHeight(); + + for (int i = x; i < x2; i++) + for (int j = y; j < y2; j++) + Draw(i, j, p); + } + + void PixelGameEngine::DrawTriangle(const olc::vi2d& pos1, const olc::vi2d& pos2, const olc::vi2d& pos3, Pixel p) + { DrawTriangle(pos1.x, pos1.y, pos2.x, pos2.y, pos3.x, pos3.y, p); } + + void PixelGameEngine::DrawTriangle(int32_t x1, int32_t y1, int32_t x2, int32_t y2, int32_t x3, int32_t y3, Pixel p) + { + DrawLine(x1, y1, x2, y2, p); + DrawLine(x2, y2, x3, y3, p); + DrawLine(x3, y3, x1, y1, p); + } + + void PixelGameEngine::FillTriangle(const olc::vi2d& pos1, const olc::vi2d& pos2, const olc::vi2d& pos3, Pixel p) + { FillTriangle(pos1.x, pos1.y, pos2.x, pos2.y, pos3.x, pos3.y, p); } + + // https://www.avrfreaks.net/sites/default/files/triangles.c + void PixelGameEngine::FillTriangle(int32_t x1, int32_t y1, int32_t x2, int32_t y2, int32_t x3, int32_t y3, Pixel p) + { + auto drawline = [&](int sx, int ex, int ny) { for (int i = sx; i <= ex; i++) Draw(i, ny, p); }; + + int t1x, t2x, y, minx, maxx, t1xp, t2xp; + bool changed1 = false; + bool changed2 = false; + int signx1, signx2, dx1, dy1, dx2, dy2; + int e1, e2; + // Sort vertices + if (y1 > y2) { std::swap(y1, y2); std::swap(x1, x2); } + if (y1 > y3) { std::swap(y1, y3); std::swap(x1, x3); } + if (y2 > y3) { std::swap(y2, y3); std::swap(x2, x3); } + + t1x = t2x = x1; y = y1; // Starting points + dx1 = (int)(x2 - x1); + if (dx1 < 0) { dx1 = -dx1; signx1 = -1; } + else signx1 = 1; + dy1 = (int)(y2 - y1); + + dx2 = (int)(x3 - x1); + if (dx2 < 0) { dx2 = -dx2; signx2 = -1; } + else signx2 = 1; + dy2 = (int)(y3 - y1); + + if (dy1 > dx1) { std::swap(dx1, dy1); changed1 = true; } + if (dy2 > dx2) { std::swap(dy2, dx2); changed2 = true; } + + e2 = (int)(dx2 >> 1); + // Flat top, just process the second half + if (y1 == y2) goto next; + e1 = (int)(dx1 >> 1); + + for (int i = 0; i < dx1;) { + t1xp = 0; t2xp = 0; + if (t1x < t2x) { minx = t1x; maxx = t2x; } + else { minx = t2x; maxx = t1x; } + // process first line until y value is about to change + while (i < dx1) { + i++; + e1 += dy1; + while (e1 >= dx1) { + e1 -= dx1; + if (changed1) t1xp = signx1;//t1x += signx1; + else goto next1; + } + if (changed1) break; + else t1x += signx1; + } + // Move line + next1: + // process second line until y value is about to change + while (1) { + e2 += dy2; + while (e2 >= dx2) { + e2 -= dx2; + if (changed2) t2xp = signx2;//t2x += signx2; + else goto next2; + } + if (changed2) break; + else t2x += signx2; + } + next2: + if (minx > t1x) minx = t1x; + if (minx > t2x) minx = t2x; + if (maxx < t1x) maxx = t1x; + if (maxx < t2x) maxx = t2x; + drawline(minx, maxx, y); // Draw line from min to max points found on the y + // Now increase y + if (!changed1) t1x += signx1; + t1x += t1xp; + if (!changed2) t2x += signx2; + t2x += t2xp; + y += 1; + if (y == y2) break; + } + next: + // Second half + dx1 = (int)(x3 - x2); if (dx1 < 0) { dx1 = -dx1; signx1 = -1; } + else signx1 = 1; + dy1 = (int)(y3 - y2); + t1x = x2; + + if (dy1 > dx1) { // swap values + std::swap(dy1, dx1); + changed1 = true; + } + else changed1 = false; + + e1 = (int)(dx1 >> 1); + + for (int i = 0; i <= dx1; i++) { + t1xp = 0; t2xp = 0; + if (t1x < t2x) { minx = t1x; maxx = t2x; } + else { minx = t2x; maxx = t1x; } + // process first line until y value is about to change + while (i < dx1) { + e1 += dy1; + while (e1 >= dx1) { + e1 -= dx1; + if (changed1) { t1xp = signx1; break; }//t1x += signx1; + else goto next3; + } + if (changed1) break; + else t1x += signx1; + if (i < dx1) i++; + } + next3: + // process second line until y value is about to change + while (t2x != x3) { + e2 += dy2; + while (e2 >= dx2) { + e2 -= dx2; + if (changed2) t2xp = signx2; + else goto next4; + } + if (changed2) break; + else t2x += signx2; + } + next4: + + if (minx > t1x) minx = t1x; + if (minx > t2x) minx = t2x; + if (maxx < t1x) maxx = t1x; + if (maxx < t2x) maxx = t2x; + drawline(minx, maxx, y); + if (!changed1) t1x += signx1; + t1x += t1xp; + if (!changed2) t2x += signx2; + t2x += t2xp; + y += 1; + if (y > y3) return; + } + } + + void PixelGameEngine::FillTexturedTriangle(const std::vector& vPoints, std::vector vTex, std::vector vColour, olc::Sprite* sprTex) + { + olc::vi2d p1 = vPoints[0]; + olc::vi2d p2 = vPoints[1]; + olc::vi2d p3 = vPoints[2]; + + if (p2.y < p1.y){std::swap(p1.y, p2.y); std::swap(p1.x, p2.x); std::swap(vTex[0].x, vTex[1].x); std::swap(vTex[0].y, vTex[1].y); std::swap(vColour[0], vColour[1]);} + if (p3.y < p1.y){std::swap(p1.y, p3.y); std::swap(p1.x, p3.x); std::swap(vTex[0].x, vTex[2].x); std::swap(vTex[0].y, vTex[2].y); std::swap(vColour[0], vColour[2]);} + if (p3.y < p2.y){std::swap(p2.y, p3.y); std::swap(p2.x, p3.x); std::swap(vTex[1].x, vTex[2].x); std::swap(vTex[1].y, vTex[2].y); std::swap(vColour[1], vColour[2]);} + + olc::vi2d dPos1 = p2 - p1; + olc::vf2d dTex1 = vTex[1] - vTex[0]; + int dcr1 = vColour[1].r - vColour[0].r; + int dcg1 = vColour[1].g - vColour[0].g; + int dcb1 = vColour[1].b - vColour[0].b; + int dca1 = vColour[1].a - vColour[0].a; + + olc::vi2d dPos2 = p3 - p1; + olc::vf2d dTex2 = vTex[2] - vTex[0]; + int dcr2 = vColour[2].r - vColour[0].r; + int dcg2 = vColour[2].g - vColour[0].g; + int dcb2 = vColour[2].b - vColour[0].b; + int dca2 = vColour[2].a - vColour[0].a; + + float dax_step = 0, dbx_step = 0, dcr1_step = 0, dcr2_step = 0, dcg1_step = 0, dcg2_step = 0, dcb1_step = 0, dcb2_step = 0, dca1_step = 0, dca2_step = 0; + olc::vf2d vTex1Step, vTex2Step; + + if (dPos1.y) + { + dax_step = dPos1.x / (float)abs(dPos1.y); + vTex1Step = dTex1 / (float)abs(dPos1.y); + dcr1_step = dcr1 / (float)abs(dPos1.y); + dcg1_step = dcg1 / (float)abs(dPos1.y); + dcb1_step = dcb1 / (float)abs(dPos1.y); + dca1_step = dca1 / (float)abs(dPos1.y); + } + + if (dPos2.y) + { + dbx_step = dPos2.x / (float)abs(dPos2.y); + vTex2Step = dTex2 / (float)abs(dPos2.y); + dcr2_step = dcr2 / (float)abs(dPos2.y); + dcg2_step = dcg2 / (float)abs(dPos2.y); + dcb2_step = dcb2 / (float)abs(dPos2.y); + dca2_step = dca2 / (float)abs(dPos2.y); + } + + olc::vi2d vStart; + olc::vi2d vEnd; + int vStartIdx; + + for (int pass = 0; pass < 2; pass++) + { + if (pass == 0) + { + vStart = p1; vEnd = p2; vStartIdx = 0; + } + else + { + dPos1 = p3 - p2; + dTex1 = vTex[2] - vTex[1]; + dcr1 = vColour[2].r - vColour[1].r; + dcg1 = vColour[2].g - vColour[1].g; + dcb1 = vColour[2].b - vColour[1].b; + dca1 = vColour[2].a - vColour[1].a; + dcr1_step = 0; dcg1_step = 0; dcb1_step = 0; dca1_step = 0; + + if (dPos2.y) dbx_step = dPos2.x / (float)abs(dPos2.y); + if (dPos1.y) + { + dax_step = dPos1.x / (float)abs(dPos1.y); + vTex1Step = dTex1 / (float)abs(dPos1.y); + dcr1_step = dcr1 / (float)abs(dPos1.y); + dcg1_step = dcg1 / (float)abs(dPos1.y); + dcb1_step = dcb1 / (float)abs(dPos1.y); + dca1_step = dca1 / (float)abs(dPos1.y); + } + + vStart = p2; vEnd = p3; vStartIdx = 1; + } + + if (dPos1.y) + { + for (int i = vStart.y; i <= vEnd.y; i++) + { + int ax = int(vStart.x + (float)(i - vStart.y) * dax_step); + int bx = int(p1.x + (float)(i - p1.y) * dbx_step); + + olc::vf2d tex_s(vTex[vStartIdx].x + (float)(i - vStart.y) * vTex1Step.x, vTex[vStartIdx].y + (float)(i - vStart.y) * vTex1Step.y); + olc::vf2d tex_e(vTex[0].x + (float)(i - p1.y) * vTex2Step.x, vTex[0].y + (float)(i - p1.y) * vTex2Step.y); + + olc::Pixel col_s(vColour[vStartIdx].r + uint8_t((float)(i - vStart.y) * dcr1_step), vColour[vStartIdx].g + uint8_t((float)(i - vStart.y) * dcg1_step), + vColour[vStartIdx].b + uint8_t((float)(i - vStart.y) * dcb1_step), vColour[vStartIdx].a + uint8_t((float)(i - vStart.y) * dca1_step)); + + olc::Pixel col_e(vColour[0].r + uint8_t((float)(i - p1.y) * dcr2_step), vColour[0].g + uint8_t((float)(i - p1.y) * dcg2_step), + vColour[0].b + uint8_t((float)(i - p1.y) * dcb2_step), vColour[0].a + uint8_t((float)(i - p1.y) * dca2_step)); + + if (ax > bx) { std::swap(ax, bx); std::swap(tex_s, tex_e); std::swap(col_s, col_e); } + + float tstep = 1.0f / ((float)(bx - ax)); + float t = 0.0f; + + for (int j = ax; j < bx; j++) + { + olc::Pixel pixel = PixelLerp(col_s, col_e, t); + if (sprTex != nullptr) pixel *= sprTex->Sample(tex_s.lerp(tex_e, t)); + Draw(j, i, pixel); + t += tstep; + } + } + } + } + } + + void PixelGameEngine::FillTexturedPolygon(const std::vector& vPoints, const std::vector& vTex, const std::vector& vColour, olc::Sprite* sprTex, olc::DecalStructure structure) + { + if (structure == olc::DecalStructure::LINE) + { + return; // Meaningless, so do nothing + } + + if (vPoints.size() < 3 || vTex.size() < 3 || vColour.size() < 3) + return; + + if (structure == olc::DecalStructure::LIST) + { + for (int tri = 0; tri < vPoints.size() / 3; tri++) + { + std::vector vP = { vPoints[tri * 3 + 0], vPoints[tri * 3 + 1], vPoints[tri * 3 + 2] }; + std::vector vT = { vTex[tri * 3 + 0], vTex[tri * 3 + 1], vTex[tri * 3 + 2] }; + std::vector vC = { vColour[tri * 3 + 0], vColour[tri * 3 + 1], vColour[tri * 3 + 2] }; + FillTexturedTriangle(vP, vT, vC, sprTex); + } + return; + } + + if (structure == olc::DecalStructure::STRIP) + { + for (int tri = 2; tri < vPoints.size(); tri++) + { + std::vector vP = { vPoints[tri - 2], vPoints[tri-1], vPoints[tri] }; + std::vector vT = { vTex[tri - 2], vTex[tri - 1], vTex[tri] }; + std::vector vC = { vColour[tri - 2], vColour[tri - 1], vColour[tri] }; + FillTexturedTriangle(vP, vT, vC, sprTex); + } + return; + } + + if (structure == olc::DecalStructure::FAN) + { + for (int tri = 2; tri < vPoints.size(); tri++) + { + std::vector vP = { vPoints[0], vPoints[tri - 1], vPoints[tri] }; + std::vector vT = { vTex[0], vTex[tri - 1], vTex[tri] }; + std::vector vC = { vColour[0], vColour[tri - 1], vColour[tri] }; + FillTexturedTriangle(vP, vT, vC, sprTex); + } + return; + } + } + + + void PixelGameEngine::DrawSprite(const olc::vi2d& pos, Sprite* sprite, uint32_t scale, uint8_t flip) + { DrawSprite(pos.x, pos.y, sprite, scale, flip); } + + void PixelGameEngine::DrawSprite(int32_t x, int32_t y, Sprite* sprite, uint32_t scale, uint8_t flip) + { + if (sprite == nullptr) + return; + + int32_t fxs = 0, fxm = 1, fx = 0; + int32_t fys = 0, fym = 1, fy = 0; + if (flip & olc::Sprite::Flip::HORIZ) { fxs = sprite->width - 1; fxm = -1; } + if (flip & olc::Sprite::Flip::VERT) { fys = sprite->height - 1; fym = -1; } + + if (scale > 1) + { + fx = fxs; + for (int32_t i = 0; i < sprite->width; i++, fx += fxm) + { + fy = fys; + for (int32_t j = 0; j < sprite->height; j++, fy += fym) + for (uint32_t is = 0; is < scale; is++) + for (uint32_t js = 0; js < scale; js++) + Draw(x + (i * scale) + is, y + (j * scale) + js, sprite->GetPixel(fx, fy)); + } + } + else + { + fx = fxs; + for (int32_t i = 0; i < sprite->width; i++, fx += fxm) + { + fy = fys; + for (int32_t j = 0; j < sprite->height; j++, fy += fym) + Draw(x + i, y + j, sprite->GetPixel(fx, fy)); + } + } + } + + void PixelGameEngine::DrawPartialSprite(const olc::vi2d& pos, Sprite* sprite, const olc::vi2d& sourcepos, const olc::vi2d& size, uint32_t scale, uint8_t flip) + { DrawPartialSprite(pos.x, pos.y, sprite, sourcepos.x, sourcepos.y, size.x, size.y, scale, flip); } + + void PixelGameEngine::DrawPartialSprite(int32_t x, int32_t y, Sprite* sprite, int32_t ox, int32_t oy, int32_t w, int32_t h, uint32_t scale, uint8_t flip) + { + if (sprite == nullptr) + return; + + int32_t fxs = 0, fxm = 1, fx = 0; + int32_t fys = 0, fym = 1, fy = 0; + if (flip & olc::Sprite::Flip::HORIZ) { fxs = w - 1; fxm = -1; } + if (flip & olc::Sprite::Flip::VERT) { fys = h - 1; fym = -1; } + + if (scale > 1) + { + fx = fxs; + for (int32_t i = 0; i < w; i++, fx += fxm) + { + fy = fys; + for (int32_t j = 0; j < h; j++, fy += fym) + for (uint32_t is = 0; is < scale; is++) + for (uint32_t js = 0; js < scale; js++) + Draw(x + (i * scale) + is, y + (j * scale) + js, sprite->GetPixel(fx + ox, fy + oy)); + } + } + else + { + fx = fxs; + for (int32_t i = 0; i < w; i++, fx += fxm) + { + fy = fys; + for (int32_t j = 0; j < h; j++, fy += fym) + Draw(x + i, y + j, sprite->GetPixel(fx + ox, fy + oy)); + } + } + } + + void PixelGameEngine::SetDecalMode(const olc::DecalMode& mode) + { nDecalMode = mode; } + + void PixelGameEngine::SetDecalStructure(const olc::DecalStructure& structure) + { nDecalStructure = structure; } + + void PixelGameEngine::DrawPartialDecal(const olc::vf2d& pos, olc::Decal* decal, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::vf2d& scale, const olc::Pixel& tint) + { + olc::vf2d vScreenSpacePos = + { + (pos.x * vInvScreenSize.x) * 2.0f - 1.0f, + -((pos.y * vInvScreenSize.y) * 2.0f - 1.0f) + }; + + + olc::vf2d vScreenSpaceDim = + { + ((pos.x + source_size.x * scale.x) * vInvScreenSize.x) * 2.0f - 1.0f, + -(((pos.y + source_size.y * scale.y) * vInvScreenSize.y) * 2.0f - 1.0f) + }; + + olc::vf2d vWindow = olc::vf2d(vViewSize); + olc::vf2d vQuantisedPos = ((vScreenSpacePos * vWindow) + olc::vf2d(0.5f, 0.5f)).floor() / vWindow; + olc::vf2d vQuantisedDim = ((vScreenSpaceDim * vWindow) + olc::vf2d(0.5f, -0.5f)).ceil() / vWindow; + + DecalInstance di; + di.points = 4; + di.decal = decal; + di.tint = { tint, tint, tint, tint }; + di.pos = { { vQuantisedPos.x, vQuantisedPos.y }, { vQuantisedPos.x, vQuantisedDim.y }, { vQuantisedDim.x, vQuantisedDim.y }, { vQuantisedDim.x, vQuantisedPos.y } }; + olc::vf2d uvtl = (source_pos + olc::vf2d(0.0001f, 0.0001f)) * decal->vUVScale; + olc::vf2d uvbr = (source_pos + source_size - olc::vf2d(0.0001f, 0.0001f)) * decal->vUVScale; + di.uv = { { uvtl.x, uvtl.y }, { uvtl.x, uvbr.y }, { uvbr.x, uvbr.y }, { uvbr.x, uvtl.y } }; + di.w = { 1,1,1,1 }; + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + void PixelGameEngine::DrawPartialDecal(const olc::vf2d& pos, const olc::vf2d& size, olc::Decal* decal, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint) + { + olc::vf2d vScreenSpacePos = + { + (pos.x * vInvScreenSize.x) * 2.0f - 1.0f, + ((pos.y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f + }; + + olc::vf2d vScreenSpaceDim = + { + vScreenSpacePos.x + (2.0f * size.x * vInvScreenSize.x), + vScreenSpacePos.y - (2.0f * size.y * vInvScreenSize.y) + }; + + DecalInstance di; + di.points = 4; + di.decal = decal; + di.tint = { tint, tint, tint, tint }; + di.pos = { { vScreenSpacePos.x, vScreenSpacePos.y }, { vScreenSpacePos.x, vScreenSpaceDim.y }, { vScreenSpaceDim.x, vScreenSpaceDim.y }, { vScreenSpaceDim.x, vScreenSpacePos.y } }; + olc::vf2d uvtl = (source_pos) * decal->vUVScale; + olc::vf2d uvbr = uvtl + ((source_size) * decal->vUVScale); + di.uv = { { uvtl.x, uvtl.y }, { uvtl.x, uvbr.y }, { uvbr.x, uvbr.y }, { uvbr.x, uvtl.y } }; + di.w = { 1,1,1,1 }; + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + + void PixelGameEngine::DrawDecal(const olc::vf2d& pos, olc::Decal* decal, const olc::vf2d& scale, const olc::Pixel& tint) + { + olc::vf2d vScreenSpacePos = + { + (pos.x * vInvScreenSize.x) * 2.0f - 1.0f, + ((pos.y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f + }; + + olc::vf2d vScreenSpaceDim = + { + vScreenSpacePos.x + (2.0f * (float(decal->sprite->width) * vInvScreenSize.x)) * scale.x, + vScreenSpacePos.y - (2.0f * (float(decal->sprite->height) * vInvScreenSize.y)) * scale.y + }; + + DecalInstance di; + di.decal = decal; + di.points = 4; + di.tint = { tint, tint, tint, tint }; + di.pos = { { vScreenSpacePos.x, vScreenSpacePos.y }, { vScreenSpacePos.x, vScreenSpaceDim.y }, { vScreenSpaceDim.x, vScreenSpaceDim.y }, { vScreenSpaceDim.x, vScreenSpacePos.y } }; + di.uv = { { 0.0f, 0.0f}, {0.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, 0.0f} }; + di.w = { 1, 1, 1, 1 }; + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + void PixelGameEngine::DrawExplicitDecal(olc::Decal* decal, const olc::vf2d* pos, const olc::vf2d* uv, const olc::Pixel* col, uint32_t elements) + { + DecalInstance di; + di.decal = decal; + di.pos.resize(elements); + di.uv.resize(elements); + di.w.resize(elements); + di.tint.resize(elements); + di.points = elements; + for (uint32_t i = 0; i < elements; i++) + { + di.pos[i] = { (pos[i].x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos[i].y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + di.uv[i] = uv[i]; + di.tint[i] = col[i]; + di.w[i] = 1.0f; + } + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + void PixelGameEngine::DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& uv, const olc::Pixel tint) + { + DecalInstance di; + di.decal = decal; + di.points = uint32_t(pos.size()); + di.pos.resize(di.points); + di.uv.resize(di.points); + di.w.resize(di.points); + di.tint.resize(di.points); + for (uint32_t i = 0; i < di.points; i++) + { + di.pos[i] = { (pos[i].x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos[i].y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + di.uv[i] = uv[i]; + di.tint[i] = tint; + di.w[i] = 1.0f; + } + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + void PixelGameEngine::DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& uv, const std::vector &tint) + { + DecalInstance di; + di.decal = decal; + di.points = uint32_t(pos.size()); + di.pos.resize(di.points); + di.uv.resize(di.points); + di.w.resize(di.points); + di.tint.resize(di.points); + for (uint32_t i = 0; i < di.points; i++) + { + di.pos[i] = { (pos[i].x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos[i].y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + di.uv[i] = uv[i]; + di.tint[i] = tint[i]; + di.w[i] = 1.0f; + } + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + void PixelGameEngine::DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& uv, const std::vector& colours, const olc::Pixel tint) + { + std::vector newColours(colours.size(), olc::WHITE); + std::transform(colours.begin(), colours.end(), newColours.begin(), + [&tint](const olc::Pixel pin) { return pin * tint; }); + DrawPolygonDecal(decal, pos, uv, newColours); + } + + + void PixelGameEngine::DrawPolygonDecal(olc::Decal* decal, const std::vector& pos, const std::vector& depth, const std::vector& uv, const olc::Pixel tint) + { + DecalInstance di; + di.decal = decal; + di.points = uint32_t(pos.size()); + di.pos.resize(di.points); + di.uv.resize(di.points); + di.w.resize(di.points); + di.tint.resize(di.points); + for (uint32_t i = 0; i < di.points; i++) + { + di.pos[i] = { (pos[i].x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos[i].y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + di.uv[i] = uv[i]; + di.tint[i] = tint; + di.w[i] = 1.0f; + } + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + +#ifdef OLC_ENABLE_EXPERIMENTAL + // Lightweight 3D + void PixelGameEngine::LW3D_DrawTriangles(olc::Decal* decal, const std::vector>& pos, const std::vector& tex, const std::vector& col) + { + DecalInstance di; + di.decal = decal; + di.points = uint32_t(pos.size()); + di.pos.resize(di.points); + di.uv.resize(di.points); + di.w.resize(di.points); + di.tint.resize(di.points); + for (uint32_t i = 0; i < di.points; i++) + { + di.pos[i] = { pos[i][0], pos[i][1] }; + di.w[i] = pos[i][2]; + di.uv[i] = tex[i]; + di.tint[i] = col[i]; + } + di.mode = DecalMode::MODEL3D; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } +#endif + + void PixelGameEngine::DrawLineDecal(const olc::vf2d& pos1, const olc::vf2d& pos2, Pixel p) + { + auto m = nDecalMode; + nDecalMode = olc::DecalMode::WIREFRAME; + DrawPolygonDecal(nullptr, { pos1, pos2 }, { {0, 0}, {0,0} }, p); + nDecalMode = m; + + /*DecalInstance di; + di.decal = nullptr; + di.points = uint32_t(2); + di.pos.resize(di.points); + di.uv.resize(di.points); + di.w.resize(di.points); + di.tint.resize(di.points); + di.pos[0] = { (pos1.x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos1.y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + di.uv[0] = { 0.0f, 0.0f }; + di.tint[0] = p; + di.w[0] = 1.0f; + di.pos[1] = { (pos2.x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos2.y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + di.uv[1] = { 0.0f, 0.0f }; + di.tint[1] = p; + di.w[1] = 1.0f; + di.mode = olc::DecalMode::WIREFRAME; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di);*/ + } + + void PixelGameEngine::DrawRectDecal(const olc::vf2d& pos, const olc::vf2d& size, const olc::Pixel col) + { + auto m = nDecalMode; + SetDecalMode(olc::DecalMode::WIREFRAME); + olc::vf2d vNewSize = size;// (size - olc::vf2d(0.375f, 0.375f)).ceil(); + std::array points = { { {pos}, {pos.x, pos.y + vNewSize.y}, {pos + vNewSize}, {pos.x + vNewSize.x, pos.y} } }; + std::array uvs = { {{0,0},{0,0},{0,0},{0,0}} }; + std::array cols = { {col, col, col, col} }; + DrawExplicitDecal(nullptr, points.data(), uvs.data(), cols.data(), 4); + SetDecalMode(m); + + } + + void PixelGameEngine::FillRectDecal(const olc::vf2d& pos, const olc::vf2d& size, const olc::Pixel col) + { + olc::vf2d vNewSize = size;// (size - olc::vf2d(0.375f, 0.375f)).ceil(); + std::array points = { { {pos}, {pos.x, pos.y + vNewSize.y}, {pos + vNewSize}, {pos.x + vNewSize.x, pos.y} } }; + std::array uvs = { {{0,0},{0,0},{0,0},{0,0}} }; + std::array cols = { {col, col, col, col} }; + DrawExplicitDecal(nullptr, points.data(), uvs.data(), cols.data(), 4); + } + + void PixelGameEngine::GradientFillRectDecal(const olc::vf2d& pos, const olc::vf2d& size, const olc::Pixel colTL, const olc::Pixel colBL, const olc::Pixel colBR, const olc::Pixel colTR) + { + std::array points = { { {pos}, {pos.x, pos.y + size.y}, {pos + size}, {pos.x + size.x, pos.y} } }; + std::array uvs = { {{0,0},{0,0},{0,0},{0,0}} }; + std::array cols = { {colTL, colBL, colBR, colTR} }; + DrawExplicitDecal(nullptr, points.data(), uvs.data(), cols.data(), 4); + } + + void PixelGameEngine::DrawRotatedDecal(const olc::vf2d& pos, olc::Decal* decal, const float fAngle, const olc::vf2d& center, const olc::vf2d& scale, const olc::Pixel& tint) + { + DecalInstance di; + di.decal = decal; + di.pos.resize(4); + di.uv = { { 0.0f, 0.0f}, {0.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, 0.0f} }; + di.w = { 1, 1, 1, 1 }; + di.tint = { tint, tint, tint, tint }; + di.points = 4; + di.pos[0] = (olc::vf2d(0.0f, 0.0f) - center) * scale; + di.pos[1] = (olc::vf2d(0.0f, float(decal->sprite->height)) - center) * scale; + di.pos[2] = (olc::vf2d(float(decal->sprite->width), float(decal->sprite->height)) - center) * scale; + di.pos[3] = (olc::vf2d(float(decal->sprite->width), 0.0f) - center) * scale; + float c = cos(fAngle), s = sin(fAngle); + for (int i = 0; i < 4; i++) + { + di.pos[i] = pos + olc::vf2d(di.pos[i].x * c - di.pos[i].y * s, di.pos[i].x * s + di.pos[i].y * c); + di.pos[i] = di.pos[i] * vInvScreenSize * 2.0f - olc::vf2d(1.0f, 1.0f); + di.pos[i].y *= -1.0f; + di.w[i] = 1; + } + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + + void PixelGameEngine::DrawPartialRotatedDecal(const olc::vf2d& pos, olc::Decal* decal, const float fAngle, const olc::vf2d& center, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::vf2d& scale, const olc::Pixel& tint) + { + DecalInstance di; + di.decal = decal; + di.points = 4; + di.tint = { tint, tint, tint, tint }; + di.w = { 1, 1, 1, 1 }; + di.pos.resize(4); + di.pos[0] = (olc::vf2d(0.0f, 0.0f) - center) * scale; + di.pos[1] = (olc::vf2d(0.0f, source_size.y) - center) * scale; + di.pos[2] = (olc::vf2d(source_size.x, source_size.y) - center) * scale; + di.pos[3] = (olc::vf2d(source_size.x, 0.0f) - center) * scale; + float c = cos(fAngle), s = sin(fAngle); + for (int i = 0; i < 4; i++) + { + di.pos[i] = pos + olc::vf2d(di.pos[i].x * c - di.pos[i].y * s, di.pos[i].x * s + di.pos[i].y * c); + di.pos[i] = di.pos[i] * vInvScreenSize * 2.0f - olc::vf2d(1.0f, 1.0f); + di.pos[i].y *= -1.0f; + } + + olc::vf2d uvtl = source_pos * decal->vUVScale; + olc::vf2d uvbr = uvtl + (source_size * decal->vUVScale); + di.uv = { { uvtl.x, uvtl.y }, { uvtl.x, uvbr.y }, { uvbr.x, uvbr.y }, { uvbr.x, uvtl.y } }; + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + + void PixelGameEngine::DrawPartialWarpedDecal(olc::Decal* decal, const olc::vf2d* pos, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint) + { + DecalInstance di; + di.points = 4; + di.decal = decal; + di.tint = { tint, tint, tint, tint }; + di.w = { 1, 1, 1, 1 }; + di.pos.resize(4); + di.uv = { { 0.0f, 0.0f}, {0.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, 0.0f} }; + olc::vf2d center; + float rd = ((pos[2].x - pos[0].x) * (pos[3].y - pos[1].y) - (pos[3].x - pos[1].x) * (pos[2].y - pos[0].y)); + if (rd != 0) + { + olc::vf2d uvtl = source_pos * decal->vUVScale; + olc::vf2d uvbr = uvtl + (source_size * decal->vUVScale); + di.uv = { { uvtl.x, uvtl.y }, { uvtl.x, uvbr.y }, { uvbr.x, uvbr.y }, { uvbr.x, uvtl.y } }; + + rd = 1.0f / rd; + float rn = ((pos[3].x - pos[1].x) * (pos[0].y - pos[1].y) - (pos[3].y - pos[1].y) * (pos[0].x - pos[1].x)) * rd; + float sn = ((pos[2].x - pos[0].x) * (pos[0].y - pos[1].y) - (pos[2].y - pos[0].y) * (pos[0].x - pos[1].x)) * rd; + if (!(rn < 0.f || rn > 1.f || sn < 0.f || sn > 1.f)) center = pos[0] + rn * (pos[2] - pos[0]); + float d[4]; for (int i = 0; i < 4; i++) d[i] = (pos[i] - center).mag(); + for (int i = 0; i < 4; i++) + { + float q = d[i] == 0.0f ? 1.0f : (d[i] + d[(i + 2) & 3]) / d[(i + 2) & 3]; + di.uv[i] *= q; di.w[i] *= q; + di.pos[i] = { (pos[i].x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos[i].y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + } + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + } + + void PixelGameEngine::DrawWarpedDecal(olc::Decal* decal, const olc::vf2d* pos, const olc::Pixel& tint) + { + // Thanks Nathan Reed, a brilliant article explaining whats going on here + // http://www.reedbeta.com/blog/quadrilateral-interpolation-part-1/ + DecalInstance di; + di.points = 4; + di.decal = decal; + di.tint = { tint, tint, tint, tint }; + di.w = { 1, 1, 1, 1 }; + di.pos.resize(4); + di.uv = { { 0.0f, 0.0f}, {0.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, 0.0f} }; + olc::vf2d center; + float rd = ((pos[2].x - pos[0].x) * (pos[3].y - pos[1].y) - (pos[3].x - pos[1].x) * (pos[2].y - pos[0].y)); + if (rd != 0) + { + rd = 1.0f / rd; + float rn = ((pos[3].x - pos[1].x) * (pos[0].y - pos[1].y) - (pos[3].y - pos[1].y) * (pos[0].x - pos[1].x)) * rd; + float sn = ((pos[2].x - pos[0].x) * (pos[0].y - pos[1].y) - (pos[2].y - pos[0].y) * (pos[0].x - pos[1].x)) * rd; + if (!(rn < 0.f || rn > 1.f || sn < 0.f || sn > 1.f)) center = pos[0] + rn * (pos[2] - pos[0]); + float d[4]; for (int i = 0; i < 4; i++) d[i] = (pos[i] - center).mag(); + for (int i = 0; i < 4; i++) + { + float q = d[i] == 0.0f ? 1.0f : (d[i] + d[(i + 2) & 3]) / d[(i + 2) & 3]; + di.uv[i] *= q; di.w[i] *= q; + di.pos[i] = { (pos[i].x * vInvScreenSize.x) * 2.0f - 1.0f, ((pos[i].y * vInvScreenSize.y) * 2.0f - 1.0f) * -1.0f }; + } + di.mode = nDecalMode; + di.structure = nDecalStructure; + vLayers[nTargetLayer].vecDecalInstance.push_back(di); + } + } + + void PixelGameEngine::DrawWarpedDecal(olc::Decal* decal, const std::array& pos, const olc::Pixel& tint) + { DrawWarpedDecal(decal, pos.data(), tint); } + + void PixelGameEngine::DrawWarpedDecal(olc::Decal* decal, const olc::vf2d(&pos)[4], const olc::Pixel& tint) + { DrawWarpedDecal(decal, &pos[0], tint); } + + void PixelGameEngine::DrawPartialWarpedDecal(olc::Decal* decal, const std::array& pos, const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint) + { DrawPartialWarpedDecal(decal, pos.data(), source_pos, source_size, tint); } + + void PixelGameEngine::DrawPartialWarpedDecal(olc::Decal* decal, const olc::vf2d(&pos)[4], const olc::vf2d& source_pos, const olc::vf2d& source_size, const olc::Pixel& tint) + { DrawPartialWarpedDecal(decal, &pos[0], source_pos, source_size, tint); } + + void PixelGameEngine::DrawStringDecal(const olc::vf2d& pos, const std::string& sText, const Pixel col, const olc::vf2d& scale) + { + olc::vf2d spos = { 0.0f, 0.0f }; + for (auto c : sText) + { + if (c == '\n') + { + spos.x = 0; spos.y += 8.0f * scale.y; + } + else if (c == '\t') + { + spos.x += 8.0f * float(nTabSizeInSpaces) * scale.x; + } + else + { + int32_t ox = (c - 32) % 16; + int32_t oy = (c - 32) / 16; + DrawPartialDecal(pos + spos, fontRenderable.Decal(), {float(ox) * 8.0f, float(oy) * 8.0f}, {8.0f, 8.0f}, scale, col); + spos.x += 8.0f * scale.x; + } + } + } + + void PixelGameEngine::DrawStringPropDecal(const olc::vf2d& pos, const std::string& sText, const Pixel col, const olc::vf2d& scale) + { + olc::vf2d spos = { 0.0f, 0.0f }; + for (auto c : sText) + { + if (c == '\n') + { + spos.x = 0; spos.y += 8.0f * scale.y; + } + else if (c == '\t') + { + spos.x += 8.0f * float(nTabSizeInSpaces) * scale.x; + } + else + { + int32_t ox = (c - 32) % 16; + int32_t oy = (c - 32) / 16; + DrawPartialDecal(pos + spos, fontRenderable.Decal(), { float(ox) * 8.0f + float(vFontSpacing[c - 32].x), float(oy) * 8.0f }, { float(vFontSpacing[c - 32].y), 8.0f }, scale, col); + spos.x += float(vFontSpacing[c - 32].y) * scale.x; + } + } + } + // Thanks Oso-Grande/Sopadeoso For these awesom and stupidly clever Text Rotation routines... duh XD + void PixelGameEngine::DrawRotatedStringDecal(const olc::vf2d& pos, const std::string& sText, const float fAngle, const olc::vf2d& center, const Pixel col, const olc::vf2d& scale) + { + olc::vf2d spos = center; + for (auto c : sText) + { + if (c == '\n') + { + spos.x = center.x; spos.y -= 8.0f; + } + else if (c == '\t') + { + spos.x += 8.0f * float(nTabSizeInSpaces) * scale.x; + } + else + { + int32_t ox = (c - 32) % 16; + int32_t oy = (c - 32) / 16; + DrawPartialRotatedDecal(pos, fontRenderable.Decal(), fAngle, spos, { float(ox) * 8.0f, float(oy) * 8.0f }, { 8.0f, 8.0f }, scale, col); + spos.x -= 8.0f; + } + } + } + + void PixelGameEngine::DrawRotatedStringPropDecal(const olc::vf2d& pos, const std::string& sText, const float fAngle, const olc::vf2d& center, const Pixel col, const olc::vf2d& scale) + { + olc::vf2d spos = center; + for (auto c : sText) + { + if (c == '\n') + { + spos.x = center.x; spos.y -= 8.0f; + } + else if (c == '\t') + { + spos.x += 8.0f * float(nTabSizeInSpaces) * scale.x; + } + else + { + int32_t ox = (c - 32) % 16; + int32_t oy = (c - 32) / 16; + DrawPartialRotatedDecal(pos, fontRenderable.Decal(), fAngle, spos, { float(ox) * 8.0f + float(vFontSpacing[c - 32].x), float(oy) * 8.0f }, { float(vFontSpacing[c - 32].y), 8.0f }, scale, col); + spos.x -= float(vFontSpacing[c - 32].y); + } + } + } + + olc::vi2d PixelGameEngine::GetTextSize(const std::string& s) + { + olc::vi2d size = { 0,1 }; + olc::vi2d pos = { 0,1 }; + for (auto c : s) + { + if (c == '\n') { pos.y++; pos.x = 0; } + else if (c == '\t') { pos.x += nTabSizeInSpaces; } + else pos.x++; + size.x = std::max(size.x, pos.x); + size.y = std::max(size.y, pos.y); + } + return size * 8; + } + + void PixelGameEngine::DrawString(const olc::vi2d& pos, const std::string& sText, Pixel col, uint32_t scale) + { DrawString(pos.x, pos.y, sText, col, scale); } + + void PixelGameEngine::DrawString(int32_t x, int32_t y, const std::string& sText, Pixel col, uint32_t scale) + { + int32_t sx = 0; + int32_t sy = 0; + Pixel::Mode m = nPixelMode; + // Thanks @tucna, spotted bug with col.ALPHA :P + if (m != Pixel::CUSTOM) // Thanks @Megarev, required for "shaders" + { + if (col.a != 255) SetPixelMode(Pixel::ALPHA); + else SetPixelMode(Pixel::MASK); + } + for (auto c : sText) + { + if (c == '\n') + { + sx = 0; sy += 8 * scale; + } + else if (c == '\t') + { + sx += 8 * nTabSizeInSpaces * scale; + } + else + { + int32_t ox = (c - 32) % 16; + int32_t oy = (c - 32) / 16; + + if (scale > 1) + { + for (uint32_t i = 0; i < 8; i++) + for (uint32_t j = 0; j < 8; j++) + if (fontRenderable.Sprite()->GetPixel(i + ox * 8, j + oy * 8).r > 0) + for (uint32_t is = 0; is < scale; is++) + for (uint32_t js = 0; js < scale; js++) + Draw(x + sx + (i * scale) + is, y + sy + (j * scale) + js, col); + } + else + { + for (uint32_t i = 0; i < 8; i++) + for (uint32_t j = 0; j < 8; j++) + if (fontRenderable.Sprite()->GetPixel(i + ox * 8, j + oy * 8).r > 0) + Draw(x + sx + i, y + sy + j, col); + } + sx += 8 * scale; + } + } + SetPixelMode(m); + } + + olc::vi2d PixelGameEngine::GetTextSizeProp(const std::string& s) + { + olc::vi2d size = { 0,1 }; + olc::vi2d pos = { 0,1 }; + for (auto c : s) + { + if (c == '\n') { pos.y += 1; pos.x = 0; } + else if (c == '\t') { pos.x += nTabSizeInSpaces * 8; } + else pos.x += vFontSpacing[c - 32].y; + size.x = std::max(size.x, pos.x); + size.y = std::max(size.y, pos.y); + } + + size.y *= 8; + return size; + } + + void PixelGameEngine::DrawStringProp(const olc::vi2d& pos, const std::string& sText, Pixel col, uint32_t scale) + { DrawStringProp(pos.x, pos.y, sText, col, scale); } + + void PixelGameEngine::DrawStringProp(int32_t x, int32_t y, const std::string& sText, Pixel col, uint32_t scale) + { + int32_t sx = 0; + int32_t sy = 0; + Pixel::Mode m = nPixelMode; + + if (m != Pixel::CUSTOM) + { + if (col.a != 255) SetPixelMode(Pixel::ALPHA); + else SetPixelMode(Pixel::MASK); + } + for (auto c : sText) + { + if (c == '\n') + { + sx = 0; sy += 8 * scale; + } + else if (c == '\t') + { + sx += 8 * nTabSizeInSpaces * scale; + } + else + { + int32_t ox = (c - 32) % 16; + int32_t oy = (c - 32) / 16; + + if (scale > 1) + { + for (int32_t i = 0; i < vFontSpacing[c - 32].y; i++) + for (int32_t j = 0; j < 8; j++) + if (fontRenderable.Sprite()->GetPixel(i + ox * 8 + vFontSpacing[c - 32].x, j + oy * 8).r > 0) + for (int32_t is = 0; is < int(scale); is++) + for (int32_t js = 0; js < int(scale); js++) + Draw(x + sx + (i * scale) + is, y + sy + (j * scale) + js, col); + } + else + { + for (int32_t i = 0; i < vFontSpacing[c - 32].y; i++) + for (int32_t j = 0; j < 8; j++) + if (fontRenderable.Sprite()->GetPixel(i + ox * 8 + vFontSpacing[c - 32].x, j + oy * 8).r > 0) + Draw(x + sx + i, y + sy + j, col); + } + sx += vFontSpacing[c - 32].y * scale; + } + } + SetPixelMode(m); + } + + void PixelGameEngine::SetPixelMode(Pixel::Mode m) + { nPixelMode = m; } + + Pixel::Mode PixelGameEngine::GetPixelMode() + { return nPixelMode; } + + void PixelGameEngine::SetPixelMode(std::function pixelMode) + { + funcPixelMode = pixelMode; + nPixelMode = Pixel::Mode::CUSTOM; + } + + void PixelGameEngine::SetPixelBlend(float fBlend) + { + fBlendFactor = fBlend; + if (fBlendFactor < 0.0f) fBlendFactor = 0.0f; + if (fBlendFactor > 1.0f) fBlendFactor = 1.0f; + } + + std::stringstream& PixelGameEngine::ConsoleOut() + { return ssConsoleOutput; } + + bool PixelGameEngine::IsConsoleShowing() const + { return bConsoleShow; } + + void PixelGameEngine::ConsoleShow(const olc::Key& keyExit, bool bSuspendTime) + { + if (bConsoleShow) + return; + + bConsoleShow = true; + bConsoleSuspendTime = bSuspendTime; + TextEntryEnable(true); + keyConsoleExit = keyExit; + pKeyboardState[keyConsoleExit].bHeld = false; + pKeyboardState[keyConsoleExit].bPressed = false; + pKeyboardState[keyConsoleExit].bReleased = true; + } + + void PixelGameEngine::ConsoleClear() + { sConsoleLines.clear(); } + + void PixelGameEngine::ConsoleCaptureStdOut(const bool bCapture) + { + if(bCapture) + sbufOldCout = std::cout.rdbuf(ssConsoleOutput.rdbuf()); + else + std::cout.rdbuf(sbufOldCout); + } + + void PixelGameEngine::UpdateConsole() + { + if (GetKey(keyConsoleExit).bPressed) + { + TextEntryEnable(false); + bConsoleSuspendTime = false; + bConsoleShow = false; + return; + } + + // Keep Console sizes based in real screen dimensions + vConsoleCharacterScale = olc::vf2d(1.0f, 2.0f) / (olc::vf2d(vViewSize) * vInvScreenSize); + vConsoleSize = (vViewSize / olc::vi2d(8, 16)) - olc::vi2d(2, 4); + + // If console has changed size, simply reset it + if (vConsoleSize.y != sConsoleLines.size()) + { + vConsoleCursor = { 0,0 }; + sConsoleLines.clear(); + sConsoleLines.resize(vConsoleSize.y); + } + + auto TypeCharacter = [&](const char c) + { + if (c >= 32 && c < 127) + { + sConsoleLines[vConsoleCursor.y].append(1, c); + vConsoleCursor.x++; + } + + if( c == '\n' || vConsoleCursor.x >= vConsoleSize.x) + { + vConsoleCursor.y++; vConsoleCursor.x = 0; + } + + if (vConsoleCursor.y >= vConsoleSize.y) + { + vConsoleCursor.y = vConsoleSize.y - 1; + for (size_t i = 1; i < vConsoleSize.y; i++) + sConsoleLines[i - 1] = sConsoleLines[i]; + sConsoleLines[vConsoleCursor.y].clear(); + } + }; + + // Empty out "std::cout", parsing as we go + while (ssConsoleOutput.rdbuf()->sgetc() != -1) + { + char c = ssConsoleOutput.rdbuf()->sbumpc(); + TypeCharacter(c); + } + + // Draw Shadow + GradientFillRectDecal({ 0,0 }, olc::vf2d(vScreenSize), olc::PixelF(0, 0, 0.5f, 0.5f), olc::PixelF(0, 0, 0.25f, 0.5f), olc::PixelF(0, 0, 0.25f, 0.5f), olc::PixelF(0, 0, 0.25f, 0.5f)); + + // Draw the console buffer + SetDecalMode(olc::DecalMode::NORMAL); + for (int32_t nLine = 0; nLine < vConsoleSize.y; nLine++) + DrawStringDecal(olc::vf2d( 1, 1 + float(nLine) ) * vConsoleCharacterScale * 8.0f, sConsoleLines[nLine], olc::WHITE, vConsoleCharacterScale); + + // Draw Input State + FillRectDecal(olc::vf2d(1 + float((TextEntryGetCursor() + 1)), 1 + float((vConsoleSize.y - 1))) * vConsoleCharacterScale * 8.0f, olc::vf2d(8, 8) * vConsoleCharacterScale, olc::DARK_CYAN); + DrawStringDecal(olc::vf2d(1, 1 + float((vConsoleSize.y - 1))) * vConsoleCharacterScale * 8.0f, std::string(">") + TextEntryGetString(), olc::YELLOW, vConsoleCharacterScale); + } + + + const std::vector& PixelGameEngine::GetDroppedFiles() const + { return vDroppedFiles; } + + const olc::vi2d& PixelGameEngine::GetDroppedFilesPoint() const + { return vDroppedFilesPoint; } + + + void PixelGameEngine::TextEntryEnable(const bool bEnable, const std::string& sText) + { + if (bEnable) + { + nTextEntryCursor = int32_t(sText.size()); + sTextEntryString = sText; + bTextEntryEnable = true; + } + else + { + bTextEntryEnable = false; + } + } + + std::string PixelGameEngine::TextEntryGetString() const + { return sTextEntryString; } + + int32_t PixelGameEngine::TextEntryGetCursor() const + { return nTextEntryCursor; } + + bool PixelGameEngine::IsTextEntryEnabled() const + { return bTextEntryEnable; } + + + void PixelGameEngine::UpdateTextEntry() + { + // Check for typed characters + for (const auto& key : vKeyboardMap) + if (GetKey(std::get<0>(key)).bPressed) + { + sTextEntryString.insert(nTextEntryCursor, GetKey(olc::Key::SHIFT).bHeld ? std::get<2>(key) : std::get<1>(key)); + nTextEntryCursor++; + } + + // Check for command characters + if (GetKey(olc::Key::LEFT).bPressed) + nTextEntryCursor = std::max(0, nTextEntryCursor - 1); + if (GetKey(olc::Key::RIGHT).bPressed) + nTextEntryCursor = std::min(int32_t(sTextEntryString.size()), nTextEntryCursor + 1); + if (GetKey(olc::Key::BACK).bPressed && nTextEntryCursor > 0) + { + sTextEntryString.erase(nTextEntryCursor-1, 1); + nTextEntryCursor = std::max(0, nTextEntryCursor - 1); + } + if (GetKey(olc::Key::DEL).bPressed && nTextEntryCursor < sTextEntryString.size()) + sTextEntryString.erase(nTextEntryCursor, 1); + + if (GetKey(olc::Key::UP).bPressed) + { + if (!sCommandHistory.empty()) + { + if (sCommandHistoryIt != sCommandHistory.begin()) + sCommandHistoryIt--; + + nTextEntryCursor = int32_t(sCommandHistoryIt->size()); + sTextEntryString = *sCommandHistoryIt; + } + } + + if (GetKey(olc::Key::DOWN).bPressed) + { + if (!sCommandHistory.empty()) + { + if (sCommandHistoryIt != sCommandHistory.end()) + { + sCommandHistoryIt++; + if (sCommandHistoryIt != sCommandHistory.end()) + { + nTextEntryCursor = int32_t(sCommandHistoryIt->size()); + sTextEntryString = *sCommandHistoryIt; + } + else + { + nTextEntryCursor = 0; + sTextEntryString = ""; + } + } + } + } + + if (GetKey(olc::Key::ENTER).bPressed) + { + if (bConsoleShow) + { + std::cout << ">" + sTextEntryString + "\n"; + if (OnConsoleCommand(sTextEntryString)) + { + sCommandHistory.push_back(sTextEntryString); + sCommandHistoryIt = sCommandHistory.end(); + } + sTextEntryString.clear(); + nTextEntryCursor = 0; + } + else + { + OnTextEntryComplete(sTextEntryString); + TextEntryEnable(false); + } + } + } + + // User must override these functions as required. I have not made + // them abstract because I do need a default behaviour to occur if + // they are not overwritten + + bool PixelGameEngine::OnUserCreate() + { return false; } + + bool PixelGameEngine::OnUserUpdate(float fElapsedTime) + { UNUSED(fElapsedTime); return false; } + + bool PixelGameEngine::OnUserDestroy() + { return true; } + + void PixelGameEngine::OnTextEntryComplete(const std::string& sText) { UNUSED(sText); } + bool PixelGameEngine::OnConsoleCommand(const std::string& sCommand) { UNUSED(sCommand); return false; } + + // Externalised API + void PixelGameEngine::olc_UpdateViewport() + { + int32_t ww = vScreenSize.x * vPixelSize.x; + int32_t wh = vScreenSize.y * vPixelSize.y; + float wasp = (float)ww / (float)wh; + + if (bPixelCohesion) + { + vScreenPixelSize = (vWindowSize / vScreenSize); + vViewSize = (vWindowSize / vScreenSize) * vScreenSize; + } + else + { + vViewSize.x = (int32_t)vWindowSize.x; + vViewSize.y = (int32_t)((float)vViewSize.x / wasp); + + if (vViewSize.y > vWindowSize.y) + { + vViewSize.y = vWindowSize.y; + vViewSize.x = (int32_t)((float)vViewSize.y * wasp); + } + } + + vViewPos = (vWindowSize - vViewSize) / 2; + } + + void PixelGameEngine::olc_UpdateWindowSize(int32_t x, int32_t y) + { + vWindowSize = { x, y }; + olc_UpdateViewport(); + } + + void PixelGameEngine::olc_UpdateWindowPos(int32_t x, int32_t y) + { + vWindowPos = { x, y }; + } + + void PixelGameEngine::olc_UpdateMouseWheel(int32_t delta) + { nMouseWheelDeltaCache += delta; } + + void PixelGameEngine::olc_UpdateMouse(int32_t x, int32_t y) + { + // Mouse coords come in screen space + // But leave in pixel space + bHasMouseFocus = true; + vMouseWindowPos = { x, y }; + // Full Screen mode may have a weird viewport we must clamp to + x -= vViewPos.x; + y -= vViewPos.y; + vMousePosCache.x = (int32_t)(((float)x / (float)(vWindowSize.x - (vViewPos.x * 2)) * (float)vScreenSize.x)); + vMousePosCache.y = (int32_t)(((float)y / (float)(vWindowSize.y - (vViewPos.y * 2)) * (float)vScreenSize.y)); + } + + void PixelGameEngine::olc_UpdateMouseState(int32_t button, bool state) + { pMouseNewState[button] = state; } + + void PixelGameEngine::olc_UpdateKeyState(int32_t key, bool state) + { pKeyNewState[key] = state; } + + void PixelGameEngine::olc_UpdateMouseFocus(bool state) + { bHasMouseFocus = state; } + + void PixelGameEngine::olc_UpdateKeyFocus(bool state) + { bHasInputFocus = state; } + + void PixelGameEngine::olc_DropFiles(int32_t x, int32_t y, const std::vector& vFiles) + { + x -= vViewPos.x; + y -= vViewPos.y; + vDroppedFilesPointCache.x = (int32_t)(((float)x / (float)(vWindowSize.x - (vViewPos.x * 2)) * (float)vScreenSize.x)); + vDroppedFilesPointCache.y = (int32_t)(((float)y / (float)(vWindowSize.y - (vViewPos.y * 2)) * (float)vScreenSize.y)); + if (vDroppedFilesPointCache.x >= (int32_t)vScreenSize.x) vDroppedFilesPointCache.x = vScreenSize.x - 1; + if (vDroppedFilesPointCache.y >= (int32_t)vScreenSize.y) vDroppedFilesPointCache.y = vScreenSize.y - 1; + if (vDroppedFilesPointCache.x < 0) vDroppedFilesPointCache.x = 0; + if (vDroppedFilesPointCache.y < 0) vDroppedFilesPointCache.y = 0; + vDroppedFilesCache = vFiles; + } + + void PixelGameEngine::olc_Reanimate() + { bAtomActive = true; } + + bool PixelGameEngine::olc_IsRunning() + { return bAtomActive; } + + void PixelGameEngine::olc_Terminate() + { bAtomActive = false; } + + void PixelGameEngine::EngineThread() + { + // Allow platform to do stuff here if needed, since its now in the + // context of this thread + if (platform->ThreadStartUp() == olc::FAIL) return; + + // Do engine context specific initialisation + olc_PrepareEngine(); + + // Create user resources as part of this thread + for (auto& ext : vExtensions) ext->OnBeforeUserCreate(); + if (!OnUserCreate()) bAtomActive = false; + for (auto& ext : vExtensions) ext->OnAfterUserCreate(); + + while (bAtomActive) + { + // Run as fast as possible + while (bAtomActive) { olc_CoreUpdate(); } + + // Allow the user to free resources if they have overrided the destroy function + if (!OnUserDestroy()) + { + // User denied destroy for some reason, so continue running + bAtomActive = true; + } + } + + platform->ThreadCleanUp(); + } + + void PixelGameEngine::olc_PrepareEngine() + { + // Start OpenGL, the context is owned by the game thread + if (platform->CreateGraphics(bFullScreen, bEnableVSYNC, vViewPos, vViewSize) == olc::FAIL) return; + + // Construct default font sheet + olc_ConstructFontSheet(); + + // Create Primary Layer "0" + CreateLayer(); + vLayers[0].bUpdate = true; + vLayers[0].bShow = true; + SetDrawTarget(nullptr); + + m_tp1 = std::chrono::system_clock::now(); + m_tp2 = std::chrono::system_clock::now(); + } + + + void PixelGameEngine::olc_CoreUpdate() + { + // Handle Timing + m_tp2 = std::chrono::system_clock::now(); + std::chrono::duration elapsedTime = m_tp2 - m_tp1; + m_tp1 = m_tp2; + + // Our time per frame coefficient + float fElapsedTime = elapsedTime.count(); + fLastElapsed = fElapsedTime; + + if (bConsoleSuspendTime) + fElapsedTime = 0.0f; + + // Some platforms will need to check for events + platform->HandleSystemEvent(); + + // Compare hardware input states from previous frame + auto ScanHardware = [&](HWButton* pKeys, bool* pStateOld, bool* pStateNew, uint32_t nKeyCount) + { + for (uint32_t i = 0; i < nKeyCount; i++) + { + pKeys[i].bPressed = false; + pKeys[i].bReleased = false; + if (pStateNew[i] != pStateOld[i]) + { + if (pStateNew[i]) + { + pKeys[i].bPressed = !pKeys[i].bHeld; + pKeys[i].bHeld = true; + } + else + { + pKeys[i].bReleased = true; + pKeys[i].bHeld = false; + } + } + pStateOld[i] = pStateNew[i]; + } + }; + + ScanHardware(pKeyboardState, pKeyOldState, pKeyNewState, 256); + ScanHardware(pMouseState, pMouseOldState, pMouseNewState, nMouseButtons); + + // Cache mouse coordinates so they remain consistent during frame + vMousePos = vMousePosCache; + nMouseWheelDelta = nMouseWheelDeltaCache; + nMouseWheelDeltaCache = 0; + + vDroppedFiles = vDroppedFilesCache; + vDroppedFilesPoint = vDroppedFilesPointCache; + vDroppedFilesCache.clear(); + + if (bTextEntryEnable) + { + UpdateTextEntry(); + } + + // Handle Frame Update + bool bExtensionBlockFrame = false; + for (auto& ext : vExtensions) bExtensionBlockFrame |= ext->OnBeforeUserUpdate(fElapsedTime); + if (!bExtensionBlockFrame) + { + if (!OnUserUpdate(fElapsedTime)) bAtomActive = false; + + } + for (auto& ext : vExtensions) ext->OnAfterUserUpdate(fElapsedTime); + + if (bConsoleShow) + { + SetDrawTarget((uint8_t)0); + UpdateConsole(); + } + + + + // Display Frame + renderer->UpdateViewport(vViewPos, vViewSize); + renderer->ClearBuffer(olc::BLACK, true); + + // Layer 0 must always exist + vLayers[0].bUpdate = true; + vLayers[0].bShow = true; + SetDecalMode(DecalMode::NORMAL); + renderer->PrepareDrawing(); + + for (auto layer = vLayers.rbegin(); layer != vLayers.rend(); ++layer) + { + if (layer->bShow) + { + if (layer->funcHook == nullptr) + { + renderer->ApplyTexture(layer->pDrawTarget.Decal()->id); + if (!bSuspendTextureTransfer && layer->bUpdate) + { + layer->pDrawTarget.Decal()->Update(); + layer->bUpdate = false; + } + + renderer->DrawLayerQuad(layer->vOffset, layer->vScale, layer->tint); + + // Display Decals in order for this layer + for (auto& decal : layer->vecDecalInstance) + renderer->DrawDecal(decal); + layer->vecDecalInstance.clear(); + } + else + { + // Mwa ha ha.... Have Fun!!! + layer->funcHook(); + } + } + } + + + + // Present Graphics to screen + renderer->DisplayFrame(); + + // Update Title Bar + fFrameTimer += fElapsedTime; + nFrameCount++; + if (fFrameTimer >= 1.0f) + { + nLastFPS = nFrameCount; + fFrameTimer -= 1.0f; + std::string sTitle = "OneLoneCoder.com - Pixel Game Engine - " + sAppName + " - FPS: " + std::to_string(nFrameCount); + platform->SetWindowTitle(sTitle); + nFrameCount = 0; + } + } + + void PixelGameEngine::olc_ConstructFontSheet() + { + std::string data; + data += "?Q`0001oOch0o01o@F40o000000000"; + data += "O000000nOT0063Qo4d8>?7a14Gno94AA4gno94AaOT0>o3`oO400o7QN00000400"; + data += "Of80001oOg<7O7moBGT7O7lABET024@aBEd714AiOdl717a_=TH013Q>00000000"; + data += "720D000V?V5oB3Q_HdUoE7a9@DdDE4A9@DmoE4A;Hg]oM4Aj8S4D84@`00000000"; + data += "OaPT1000Oa`^13P1@AI[?g`1@A=[OdAoHgljA4Ao?WlBA7l1710007l100000000"; + data += "ObM6000oOfMV?3QoBDD`O7a0BDDH@5A0BDD<@5A0BGeVO5ao@CQR?5Po00000000"; + data += "Oc``000?Ogij70PO2D]??0Ph2DUM@7i`2DTg@7lh2GUj?0TO0C1870T?00000000"; + data += "70<4001o?P<7?1QoHg43O;`h@GT0@:@LB@d0>:@hN@L0@?aoN@<0O7ao0000?000"; + data += "OcH0001SOglLA7mg24TnK7ln24US>0PL24U140PnOgl0>7QgOcH0K71S0000A000"; + data += "00H00000@Dm1S007@DUSg00?OdTnH7YhOfTL<7Yh@Cl0700?@Ah0300700000000"; + data += "<008001QL00ZA41a@6HnI<1i@FHLM81M@@0LG81?O`0nC?Y7?`0ZA7Y300080000"; + data += "O`082000Oh0827mo6>Hn?Wmo?6HnMb11MP08@C11H`08@FP0@@0004@000000000"; + data += "00P00001Oab00003OcKP0006@6=PMgl<@440MglH@000000`@000001P00000000"; + data += "Ob@8@@00Ob@8@Ga13R@8Mga172@8?PAo3R@827QoOb@820@0O`0007`0000007P0"; + data += "O`000P08Od400g`<3V=P0G`673IP0`@3>1`00P@6O`P00g`SetPixel(px, py, olc::Pixel(k, k, k, k)); + if (++py == 48) { px++; py = 0; } + } + } + + fontRenderable.Decal()->Update(); + + constexpr std::array vSpacing = { { + 0x03,0x25,0x16,0x08,0x07,0x08,0x08,0x04,0x15,0x15,0x08,0x07,0x15,0x07,0x24,0x08, + 0x08,0x17,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x24,0x15,0x06,0x07,0x16,0x17, + 0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x17,0x08,0x08,0x17,0x08,0x08,0x08, + 0x08,0x08,0x08,0x08,0x17,0x08,0x08,0x08,0x08,0x17,0x08,0x15,0x08,0x15,0x08,0x08, + 0x24,0x18,0x17,0x17,0x17,0x17,0x17,0x17,0x17,0x33,0x17,0x17,0x33,0x18,0x17,0x17, + 0x17,0x17,0x17,0x17,0x07,0x17,0x17,0x18,0x18,0x17,0x17,0x07,0x33,0x07,0x08,0x00, } }; + + for (auto c : vSpacing) vFontSpacing.push_back({ c >> 4, c & 15 }); + + // UK Standard Layout +#ifdef OLC_KEYBOARD_UK + vKeyboardMap = + { + {olc::Key::A, "a", "A"}, {olc::Key::B, "b", "B"}, {olc::Key::C, "c", "C"}, {olc::Key::D, "d", "D"}, {olc::Key::E, "e", "E"}, + {olc::Key::F, "f", "F"}, {olc::Key::G, "g", "G"}, {olc::Key::H, "h", "H"}, {olc::Key::I, "i", "I"}, {olc::Key::J, "j", "J"}, + {olc::Key::K, "k", "K"}, {olc::Key::L, "l", "L"}, {olc::Key::M, "m", "M"}, {olc::Key::N, "n", "N"}, {olc::Key::O, "o", "O"}, + {olc::Key::P, "p", "P"}, {olc::Key::Q, "q", "Q"}, {olc::Key::R, "r", "R"}, {olc::Key::S, "s", "S"}, {olc::Key::T, "t", "T"}, + {olc::Key::U, "u", "U"}, {olc::Key::V, "v", "V"}, {olc::Key::W, "w", "W"}, {olc::Key::X, "x", "X"}, {olc::Key::Y, "y", "Y"}, + {olc::Key::Z, "z", "Z"}, + + {olc::Key::K0, "0", ")"}, {olc::Key::K1, "1", "!"}, {olc::Key::K2, "2", "\""}, {olc::Key::K3, "3", "#"}, {olc::Key::K4, "4", "$"}, + {olc::Key::K5, "5", "%"}, {olc::Key::K6, "6", "^"}, {olc::Key::K7, "7", "&"}, {olc::Key::K8, "8", "*"}, {olc::Key::K9, "9", "("}, + + {olc::Key::NP0, "0", "0"}, {olc::Key::NP1, "1", "1"}, {olc::Key::NP2, "2", "2"}, {olc::Key::NP3, "3", "3"}, {olc::Key::NP4, "4", "4"}, + {olc::Key::NP5, "5", "5"}, {olc::Key::NP6, "6", "6"}, {olc::Key::NP7, "7", "7"}, {olc::Key::NP8, "8", "8"}, {olc::Key::NP9, "9", "9"}, + {olc::Key::NP_MUL, "*", "*"}, {olc::Key::NP_DIV, "/", "/"}, {olc::Key::NP_ADD, "+", "+"}, {olc::Key::NP_SUB, "-", "-"}, {olc::Key::NP_DECIMAL, ".", "."}, + + {olc::Key::PERIOD, ".", ">"}, {olc::Key::EQUALS, "=", "+"}, {olc::Key::COMMA, ",", "<"}, {olc::Key::MINUS, "-", "_"}, {olc::Key::SPACE, " ", " "}, + + {olc::Key::OEM_1, ";", ":"}, {olc::Key::OEM_2, "/", "?"}, {olc::Key::OEM_3, "\'", "@"}, {olc::Key::OEM_4, "[", "{"}, + {olc::Key::OEM_5, "\\", "|"}, {olc::Key::OEM_6, "]", "}"}, {olc::Key::OEM_7, "#", "~"}, + + // {olc::Key::TAB, "\t", "\t"} + }; +#endif + } + + void PixelGameEngine::pgex_Register(olc::PGEX* pgex) + { + if (std::find(vExtensions.begin(), vExtensions.end(), pgex) == vExtensions.end()) + vExtensions.push_back(pgex); + } + + + PGEX::PGEX(bool bHook) { if(bHook) pge->pgex_Register(this); } + void PGEX::OnBeforeUserCreate() {} + void PGEX::OnAfterUserCreate() {} + bool PGEX::OnBeforeUserUpdate(float& fElapsedTime) { return false; } + void PGEX::OnAfterUserUpdate(float fElapsedTime) {} + + // Need a couple of statics as these are singleton instances + // read from multiple locations + std::atomic PixelGameEngine::bAtomActive{ false }; + olc::PixelGameEngine* olc::PGEX::pge = nullptr; + olc::PixelGameEngine* olc::Platform::ptrPGE = nullptr; + olc::PixelGameEngine* olc::Renderer::ptrPGE = nullptr; + std::unique_ptr olc::Sprite::loader = nullptr; +}; +#pragma endregion + + +#pragma region platform_headless +namespace olc +{ +#if defined(OLC_GFX_HEADLESS) + class Renderer_Headless : public olc::Renderer + { + public: + virtual void PrepareDevice() {}; + virtual olc::rcode CreateDevice(std::vector params, bool bFullScreen, bool bVSYNC) { return olc::rcode::OK; } + virtual olc::rcode DestroyDevice() { return olc::rcode::OK; } + virtual void DisplayFrame() {} + virtual void PrepareDrawing() {} + virtual void SetDecalMode(const olc::DecalMode& mode) {} + virtual void DrawLayerQuad(const olc::vf2d& offset, const olc::vf2d& scale, const olc::Pixel tint) {} + virtual void DrawDecal(const olc::DecalInstance& decal) {} + virtual uint32_t CreateTexture(const uint32_t width, const uint32_t height, const bool filtered = false, const bool clamp = true) {return 1;}; + virtual void UpdateTexture(uint32_t id, olc::Sprite* spr) {} + virtual void ReadTexture(uint32_t id, olc::Sprite* spr) {} + virtual uint32_t DeleteTexture(const uint32_t id) {return 1;} + virtual void ApplyTexture(uint32_t id) {} + virtual void UpdateViewport(const olc::vi2d& pos, const olc::vi2d& size) {} + virtual void ClearBuffer(olc::Pixel p, bool bDepth) {} + }; +#endif +#if defined(OLC_PLATFORM_HEADLESS) + class Platform_Headless : public olc::Platform + { + public: + virtual olc::rcode ApplicationStartUp() { return olc::rcode::OK; } + virtual olc::rcode ApplicationCleanUp() { return olc::rcode::OK; } + virtual olc::rcode ThreadStartUp() { return olc::rcode::OK; } + virtual olc::rcode ThreadCleanUp() { return olc::rcode::OK; } + virtual olc::rcode CreateGraphics(bool bFullScreen, bool bEnableVSYNC, const olc::vi2d& vViewPos, const olc::vi2d& vViewSize) { return olc::rcode::OK; } + virtual olc::rcode CreateWindowPane(const olc::vi2d& vWindowPos, olc::vi2d& vWindowSize, bool bFullScreen) { return olc::rcode::OK; } + virtual olc::rcode SetWindowTitle(const std::string& s) { return olc::rcode::OK; } + virtual olc::rcode StartSystemEventLoop() { return olc::rcode::OK; } + virtual olc::rcode HandleSystemEvent() { return olc::rcode::OK; } + }; +#endif +} +#pragma endregion + +// O------------------------------------------------------------------------------O +// | olcPixelGameEngine Renderers - the draw-y bits | +// O------------------------------------------------------------------------------O + +#pragma region image_stb +// O------------------------------------------------------------------------------O +// | START IMAGE LOADER: stb_image.h, all systems, very fast | +// O------------------------------------------------------------------------------O +// Thanks to Sean Barrett - https://github.com/nothings/stb/blob/master/stb_image.h +// MIT License - Copyright(c) 2017 Sean Barrett + +// Note you need to download the above file into your project folder, and +// #define OLC_IMAGE_STB +// #define OLC_PGE_APPLICATION +// #include "olcPixelGameEngine.h" + +#if defined(OLC_IMAGE_STB) +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" +namespace olc +{ + class ImageLoader_STB : public olc::ImageLoader + { + public: + ImageLoader_STB() : ImageLoader() + {} + + olc::rcode LoadImageResource(olc::Sprite* spr, const std::string& sImageFile, olc::ResourcePack* pack) override + { + UNUSED(pack); + // clear out existing sprite + spr->pColData.clear(); + // Open file + stbi_uc* bytes = nullptr; + int w = 0, h = 0, cmp = 0; + if (pack != nullptr) + { + ResourceBuffer rb = pack->GetFileBuffer(sImageFile); + bytes = stbi_load_from_memory((unsigned char*)rb.vMemory.data(), rb.vMemory.size(), &w, &h, &cmp, 4); + } + else + { + // Check file exists + if (!_gfs::exists(sImageFile)) return olc::rcode::NO_FILE; + bytes = stbi_load(sImageFile.c_str(), &w, &h, &cmp, 4); + } + + if (!bytes) return olc::rcode::FAIL; + spr->width = w; spr->height = h; + spr->pColData.resize(spr->width * spr->height); + std::memcpy(spr->pColData.data(), bytes, spr->width * spr->height * 4); + delete[] bytes; + return olc::rcode::OK; + } + + olc::rcode SaveImageResource(olc::Sprite* spr, const std::string& sImageFile) override + { + return olc::rcode::OK; + } + }; +} +#endif +// O------------------------------------------------------------------------------O +// | START IMAGE LOADER: stb_image.h | +// O------------------------------------------------------------------------------O +#pragma endregion + + + +#if !defined(OLC_PGE_HEADLESS) + +#pragma region renderer_ogl10 +// O------------------------------------------------------------------------------O +// | START RENDERER: OpenGL 1.0 (the original, the best...) | +// O------------------------------------------------------------------------------O +#if defined(OLC_GFX_OPENGL10) + +#if defined(OLC_PLATFORM_WINAPI) + #include + #include + #if !defined(__MINGW32__) + #pragma comment(lib, "Dwmapi.lib") + #endif + typedef BOOL(WINAPI wglSwapInterval_t) (int interval); + static wglSwapInterval_t* wglSwapInterval = nullptr; + typedef HDC glDeviceContext_t; + typedef HGLRC glRenderContext_t; +#endif + +#if defined(__linux__) || defined(__FreeBSD__) + #include +#endif + +#if defined(OLC_PLATFORM_X11) + namespace X11 + { + #include + } + typedef int(glSwapInterval_t)(X11::Display* dpy, X11::GLXDrawable drawable, int interval); + static glSwapInterval_t* glSwapIntervalEXT; + typedef X11::GLXContext glDeviceContext_t; + typedef X11::GLXContext glRenderContext_t; +#endif + +#if defined(__APPLE__) + #define GL_SILENCE_DEPRECATION + #include + #include + #include +#endif + +namespace olc +{ + class Renderer_OGL10 : public olc::Renderer + { + private: +#if defined(OLC_PLATFORM_GLUT) + bool mFullScreen = false; +#else + glDeviceContext_t glDeviceContext = 0; + glRenderContext_t glRenderContext = 0; +#endif + + bool bSync = false; + olc::DecalMode nDecalMode = olc::DecalMode(-1); // Thanks Gusgo & Bispoo + olc::DecalStructure nDecalStructure = olc::DecalStructure(-1); +#if defined(OLC_PLATFORM_X11) + X11::Display* olc_Display = nullptr; + X11::Window* olc_Window = nullptr; + X11::XVisualInfo* olc_VisualInfo = nullptr; +#endif + + public: + void PrepareDevice() override + { +#if defined(OLC_PLATFORM_GLUT) + //glutInit has to be called with main() arguments, make fake ones + int argc = 0; + char* argv[1] = { (char*)"" }; + glutInit(&argc, argv); + glutInitWindowPosition(0, 0); + glutInitWindowSize(512, 512); + glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGBA); + // Creates the window and the OpenGL context for it + glutCreateWindow("OneLoneCoder.com - Pixel Game Engine"); + glEnable(GL_TEXTURE_2D); // Turn on texturing + glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); +#endif + } + + olc::rcode CreateDevice(std::vector params, bool bFullScreen, bool bVSYNC) override + { +#if defined(OLC_PLATFORM_WINAPI) + // Create Device Context + glDeviceContext = GetDC((HWND)(params[0])); + PIXELFORMATDESCRIPTOR pfd = + { + sizeof(PIXELFORMATDESCRIPTOR), 1, + PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER, + PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + PFD_MAIN_PLANE, 0, 0, 0, 0 + }; + + int pf = 0; + if (!(pf = ChoosePixelFormat(glDeviceContext, &pfd))) return olc::FAIL; + SetPixelFormat(glDeviceContext, pf, &pfd); + + if (!(glRenderContext = wglCreateContext(glDeviceContext))) return olc::FAIL; + wglMakeCurrent(glDeviceContext, glRenderContext); + + // Remove Frame cap + wglSwapInterval = (wglSwapInterval_t*)wglGetProcAddress("wglSwapIntervalEXT"); + if (wglSwapInterval && !bVSYNC) wglSwapInterval(0); + bSync = bVSYNC; +#endif + +#if defined(OLC_PLATFORM_X11) + using namespace X11; + // Linux has tighter coupling between OpenGL and X11, so we store + // various "platform" handles in the renderer + olc_Display = (X11::Display*)(params[0]); + olc_Window = (X11::Window*)(params[1]); + olc_VisualInfo = (X11::XVisualInfo*)(params[2]); + + glDeviceContext = glXCreateContext(olc_Display, olc_VisualInfo, nullptr, GL_TRUE); + glXMakeCurrent(olc_Display, *olc_Window, glDeviceContext); + + XWindowAttributes gwa; + XGetWindowAttributes(olc_Display, *olc_Window, &gwa); + glViewport(0, 0, gwa.width, gwa.height); + + glSwapIntervalEXT = nullptr; + glSwapIntervalEXT = (glSwapInterval_t*)glXGetProcAddress((unsigned char*)"glXSwapIntervalEXT"); + + if (glSwapIntervalEXT == nullptr && !bVSYNC) + { + printf("NOTE: Could not disable VSYNC, glXSwapIntervalEXT() was not found!\n"); + printf(" Don't worry though, things will still work, it's just the\n"); + printf(" frame rate will be capped to your monitors refresh rate - javidx9\n"); + } + + if (glSwapIntervalEXT != nullptr && !bVSYNC) + glSwapIntervalEXT(olc_Display, *olc_Window, 0); +#endif + +#if defined(OLC_PLATFORM_GLUT) + mFullScreen = bFullScreen; + if (!bVSYNC) + { +#if defined(__APPLE__) + GLint sync = 0; + CGLContextObj ctx = CGLGetCurrentContext(); + if (ctx) CGLSetParameter(ctx, kCGLCPSwapInterval, &sync); +#endif + } +#else + glEnable(GL_TEXTURE_2D); // Turn on texturing + glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); +#endif + return olc::rcode::OK; + } + + olc::rcode DestroyDevice() override + { +#if defined(OLC_PLATFORM_WINAPI) + wglDeleteContext(glRenderContext); +#endif + +#if defined(OLC_PLATFORM_X11) + glXMakeCurrent(olc_Display, None, NULL); + glXDestroyContext(olc_Display, glDeviceContext); +#endif + +#if defined(OLC_PLATFORM_GLUT) + glutDestroyWindow(glutGetWindow()); +#endif + return olc::rcode::OK; + } + + void DisplayFrame() override + { +#if defined(OLC_PLATFORM_WINAPI) + SwapBuffers(glDeviceContext); + if (bSync) DwmFlush(); // Woooohooooooo!!!! SMOOOOOOOTH! +#endif + +#if defined(OLC_PLATFORM_X11) + X11::glXSwapBuffers(olc_Display, *olc_Window); +#endif + +#if defined(OLC_PLATFORM_GLUT) + glutSwapBuffers(); +#endif + } + + void PrepareDrawing() override + { + + //ClearBuffer(olc::GREEN, true); + glEnable(GL_BLEND); + nDecalMode = DecalMode::NORMAL; + nDecalStructure = DecalStructure::FAN; + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + } + + void SetDecalMode(const olc::DecalMode& mode) + { + if (mode != nDecalMode) + { + switch (mode) + { + case olc::DecalMode::NORMAL: + case olc::DecalMode::MODEL3D: + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + break; + case olc::DecalMode::ADDITIVE: + glBlendFunc(GL_SRC_ALPHA, GL_ONE); + break; + case olc::DecalMode::MULTIPLICATIVE: + glBlendFunc(GL_DST_COLOR, GL_ONE_MINUS_SRC_ALPHA); + break; + case olc::DecalMode::STENCIL: + glBlendFunc(GL_ZERO, GL_SRC_ALPHA); + break; + case olc::DecalMode::ILLUMINATE: + glBlendFunc(GL_ONE_MINUS_SRC_ALPHA, GL_SRC_ALPHA); + break; + case olc::DecalMode::WIREFRAME: + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + break; + } + + nDecalMode = mode; + } + } + + void DrawLayerQuad(const olc::vf2d& offset, const olc::vf2d& scale, const olc::Pixel tint) override + { + glBegin(GL_QUADS); + glColor4ub(tint.r, tint.g, tint.b, tint.a); + glTexCoord2f(0.0f * scale.x + offset.x, 1.0f * scale.y + offset.y); + glVertex3f(-1.0f /*+ vSubPixelOffset.x*/, -1.0f /*+ vSubPixelOffset.y*/, 0.0f); + glTexCoord2f(0.0f * scale.x + offset.x, 0.0f * scale.y + offset.y); + glVertex3f(-1.0f /*+ vSubPixelOffset.x*/, 1.0f /*+ vSubPixelOffset.y*/, 0.0f); + glTexCoord2f(1.0f * scale.x + offset.x, 0.0f * scale.y + offset.y); + glVertex3f(1.0f /*+ vSubPixelOffset.x*/, 1.0f /*+ vSubPixelOffset.y*/, 0.0f); + glTexCoord2f(1.0f * scale.x + offset.x, 1.0f * scale.y + offset.y); + glVertex3f(1.0f /*+ vSubPixelOffset.x*/, -1.0f /*+ vSubPixelOffset.y*/, 0.0f); + glEnd(); + } + + void DrawDecal(const olc::DecalInstance& decal) override + { + SetDecalMode(decal.mode); + + if (decal.decal == nullptr) + glBindTexture(GL_TEXTURE_2D, 0); + else + glBindTexture(GL_TEXTURE_2D, decal.decal->id); + + if (nDecalMode == DecalMode::MODEL3D) + { +#ifdef OLC_ENABLE_EXPERIMENTAL + glMatrixMode(GL_PROJECTION); glPushMatrix(); + glMatrixMode(GL_MODELVIEW); glPushMatrix(); + + glEnable(GL_DEPTH_TEST); + glMatrixMode(GL_PROJECTION); + glLoadIdentity(); + glFrustum(-1.0f, 1.0f, -1.0f, 1.0f, 1, 1000); + + #pragma comment (lib, "winmm.lib") + + glMatrixMode(GL_MODELVIEW); + glLoadIdentity(); + glTranslatef(0, -40, -200); + glRotatef(float(clock()) * 0.1f, 1, 0, 0); + glRotatef(float(clock()) * 0.1f * 2, 0, 1, 0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glBegin(GL_TRIANGLES); + + + // Render as 3D Spatial Entity + for (uint32_t n = 0; n < decal.points; n++) + { + glColor4ub(decal.tint[n].r, decal.tint[n].g, decal.tint[n].b, decal.tint[n].a); + glTexCoord2f(decal.uv[n].x, decal.uv[n].y); + glVertex3f(decal.pos[n].x, decal.pos[n].y, decal.w[n]); + } + + glEnd(); + + glMatrixMode(GL_PROJECTION); glPopMatrix(); + glMatrixMode(GL_MODELVIEW); glPopMatrix(); + glDisable(GL_DEPTH_TEST); +#endif + } + else + { + if (nDecalMode == DecalMode::WIREFRAME) + glBegin(GL_LINE_LOOP); + else + { + if(decal.structure == olc::DecalStructure::FAN) + glBegin(GL_TRIANGLE_FAN); + else if(decal.structure == olc::DecalStructure::STRIP) + glBegin(GL_TRIANGLE_STRIP); + else if(decal.structure == olc::DecalStructure::LIST) + glBegin(GL_TRIANGLES); + } + + // Render as 2D Spatial entity + for (uint32_t n = 0; n < decal.points; n++) + { + glColor4ub(decal.tint[n].r, decal.tint[n].g, decal.tint[n].b, decal.tint[n].a); + glTexCoord4f(decal.uv[n].x, decal.uv[n].y, 0.0f, decal.w[n]); + glVertex2f(decal.pos[n].x, decal.pos[n].y); + } + + glEnd(); + } + + + //glDisable(GL_DEPTH_TEST); + } + + uint32_t CreateTexture(const uint32_t width, const uint32_t height, const bool filtered, const bool clamp) override + { + UNUSED(width); + UNUSED(height); + uint32_t id = 0; + glGenTextures(1, &id); + glBindTexture(GL_TEXTURE_2D, id); + if (filtered) + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + } + else + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + } + + if (clamp) + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); + } + else + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + } + + glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); + return id; + } + + uint32_t DeleteTexture(const uint32_t id) override + { + glDeleteTextures(1, &id); + return id; + } + + void UpdateTexture(uint32_t id, olc::Sprite* spr) override + { + UNUSED(id); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, spr->width, spr->height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spr->GetData()); + } + + void ReadTexture(uint32_t id, olc::Sprite* spr) override + { + glReadPixels(0, 0, spr->width, spr->height, GL_RGBA, GL_UNSIGNED_BYTE, spr->GetData()); + } + + void ApplyTexture(uint32_t id) override + { + glBindTexture(GL_TEXTURE_2D, id); + } + + void ClearBuffer(olc::Pixel p, bool bDepth) override + { + glClearColor(float(p.r) / 255.0f, float(p.g) / 255.0f, float(p.b) / 255.0f, float(p.a) / 255.0f); + glClear(GL_COLOR_BUFFER_BIT); + if (bDepth) glClear(GL_DEPTH_BUFFER_BIT); + } + + void UpdateViewport(const olc::vi2d& pos, const olc::vi2d& size) override + { + glViewport(pos.x, pos.y, size.x, size.y); + } + }; +} +#endif +// O------------------------------------------------------------------------------O +// | END RENDERER: OpenGL 1.0 (the original, the best...) | +// O------------------------------------------------------------------------------O +#pragma endregion + +#pragma region renderer_ogl33 +// O------------------------------------------------------------------------------O +// | START RENDERER: OpenGL 3.3 (3.0 es) (sh-sh-sh-shaders....) | +// O------------------------------------------------------------------------------O +#if defined(OLC_GFX_OPENGL33) + +#if defined(OLC_PLATFORM_WINAPI) + #include + //#include + #if !defined(__MINGW32__) + #pragma comment(lib, "Dwmapi.lib") + #endif + //typedef void __stdcall locSwapInterval_t(GLsizei n); + typedef HDC glDeviceContext_t; + typedef HGLRC glRenderContext_t; + //#define CALLSTYLE __stdcall + #define OGL_LOAD(t, n) (t*)wglGetProcAddress(#n) +#endif +// +//#if defined(__linux__) || defined(__FreeBSD__) +// #include +//#endif + +#if defined(OLC_PLATFORM_X11) + /*namespace X11 + { + #include + } + typedef int(locSwapInterval_t)(X11::Display* dpy, X11::GLXDrawable drawable, int interval);*/ + typedef X11::GLXContext glDeviceContext_t; + typedef X11::GLXContext glRenderContext_t; + //#define CALLSTYLE + #define OGL_LOAD(t, n) (t*)glXGetProcAddress((unsigned char*)#n); +#endif + +//#if defined(__APPLE__) +// #define GL_SILENCE_DEPRECATION +// #include +// #include +// #include +//#endif + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + #include + #include + #define GL_GLEXT_PROTOTYPES + #include + #include + #define CALLSTYLE + typedef EGLBoolean(locSwapInterval_t)(EGLDisplay display, EGLint interval); + #define GL_CLAMP GL_CLAMP_TO_EDGE + #define OGL_LOAD(t, n) n; +#endif + +namespace olc +{ +// typedef char GLchar; +// typedef ptrdiff_t GLsizeiptr; +// typedef GLuint CALLSTYLE locCreateShader_t(GLenum type); +// typedef GLuint CALLSTYLE locCreateProgram_t(void); +// typedef void CALLSTYLE locDeleteShader_t(GLuint shader); +//#if defined(OLC_PLATFORM_EMSCRIPTEN) +// typedef void CALLSTYLE locShaderSource_t(GLuint shader, GLsizei count, const GLchar* const* string, const GLint* length); +//#else +// typedef void CALLSTYLE locShaderSource_t(GLuint shader, GLsizei count, const GLchar** string, const GLint* length); +//#endif +// typedef void CALLSTYLE locCompileShader_t(GLuint shader); +// typedef void CALLSTYLE locLinkProgram_t(GLuint program); +// typedef void CALLSTYLE locDeleteProgram_t(GLuint program); +// typedef void CALLSTYLE locAttachShader_t(GLuint program, GLuint shader); +// typedef void CALLSTYLE locBindBuffer_t(GLenum target, GLuint buffer); +// typedef void CALLSTYLE locBufferData_t(GLenum target, GLsizeiptr size, const void* data, GLenum usage); +// typedef void CALLSTYLE locGenBuffers_t(GLsizei n, GLuint* buffers); +// typedef void CALLSTYLE locVertexAttribPointer_t(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void* pointer); +// typedef void CALLSTYLE locEnableVertexAttribArray_t(GLuint index); +// typedef void CALLSTYLE locUseProgram_t(GLuint program); +// typedef void CALLSTYLE locBindVertexArray_t(GLuint array); +// typedef void CALLSTYLE locGenVertexArrays_t(GLsizei n, GLuint* arrays); +// typedef void CALLSTYLE locGetShaderInfoLog_t(GLuint shader, GLsizei bufSize, GLsizei* length, GLchar* infoLog); +// typedef GLint CALLSTYLE locGetUniformLocation_t(GLuint program, const GLchar* name); +// typedef void CALLSTYLE locUniform1f_t(GLint location, GLfloat v0); +// typedef void CALLSTYLE locUniform1i_t(GLint location, GLint v0); +// typedef void CALLSTYLE locUniform2fv_t(GLint location, GLsizei count, const GLfloat* value); +// typedef void CALLSTYLE locActiveTexture_t(GLenum texture); +// typedef void CALLSTYLE locGenFrameBuffers_t(GLsizei n, GLuint* ids); +// typedef void CALLSTYLE locBindFrameBuffer_t(GLenum target, GLuint fb); +// typedef GLenum CALLSTYLE locCheckFrameBufferStatus_t(GLenum target); +// typedef void CALLSTYLE locDeleteFrameBuffers_t(GLsizei n, const GLuint* fbs); +// typedef void CALLSTYLE locFrameBufferTexture2D_t(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +// typedef void CALLSTYLE locDrawBuffers_t(GLsizei n, const GLenum* bufs); +// typedef void CALLSTYLE locBlendFuncSeparate_t(GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); + + + + class Renderer_OGL33 : public olc::Renderer + { + private: +#if defined(OLC_PLATFORM_EMSCRIPTEN) + EGLDisplay olc_Display; + EGLConfig olc_Config; + EGLContext olc_Context; + EGLSurface olc_Surface; +#endif + +#if defined(OLC_PLATFORM_GLUT) + bool mFullScreen = false; +#else + #if !defined(OLC_PLATFORM_EMSCRIPTEN) + glDeviceContext_t glDeviceContext = 0; + glRenderContext_t glRenderContext = 0; + #endif +#endif + bool bSync = false; + olc::DecalMode nDecalMode = olc::DecalMode(-1); // Thanks Gusgo & Bispoo +#if defined(OLC_PLATFORM_X11) + X11::Display* olc_Display = nullptr; + X11::Window* olc_Window = nullptr; + X11::XVisualInfo* olc_VisualInfo = nullptr; +#endif + + private: + locCreateShader_t* locCreateShader = nullptr; + locShaderSource_t* locShaderSource = nullptr; + locCompileShader_t* locCompileShader = nullptr; + locDeleteShader_t* locDeleteShader = nullptr; + locCreateProgram_t* locCreateProgram = nullptr; + locDeleteProgram_t* locDeleteProgram = nullptr; + locLinkProgram_t* locLinkProgram = nullptr; + locAttachShader_t* locAttachShader = nullptr; + locBindBuffer_t* locBindBuffer = nullptr; + locBufferData_t* locBufferData = nullptr; + locGenBuffers_t* locGenBuffers = nullptr; + locVertexAttribPointer_t* locVertexAttribPointer = nullptr; + locEnableVertexAttribArray_t* locEnableVertexAttribArray = nullptr; + locUseProgram_t* locUseProgram = nullptr; + locBindVertexArray_t* locBindVertexArray = nullptr; + locGenVertexArrays_t* locGenVertexArrays = nullptr; + locSwapInterval_t* locSwapInterval = nullptr; + locGetShaderInfoLog_t* locGetShaderInfoLog = nullptr; + + uint32_t m_nFS = 0; + uint32_t m_nVS = 0; + uint32_t m_nQuadShader = 0; + uint32_t m_vbQuad = 0; + uint32_t m_vaQuad = 0; + + struct locVertex + { + float pos[3]; + olc::vf2d tex; + olc::Pixel col; + }; + + locVertex pVertexMem[OLC_MAX_VERTS]; + + olc::Renderable rendBlankQuad; + + public: + void PrepareDevice() override + { +#if defined(OLC_PLATFORM_GLUT) + //glutInit has to be called with main() arguments, make fake ones + int argc = 0; + char* argv[1] = { (char*)"" }; + glutInit(&argc, argv); + glutInitWindowPosition(0, 0); + glutInitWindowSize(512, 512); + glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGBA); + // Creates the window and the OpenGL context for it + glutCreateWindow("OneLoneCoder.com - Pixel Game Engine"); + glEnable(GL_TEXTURE_2D); // Turn on texturing + glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); +#endif + } + + olc::rcode CreateDevice(std::vector params, bool bFullScreen, bool bVSYNC) override + { + // Create OpenGL Context +#if defined(OLC_PLATFORM_WINAPI) + // Create Device Context + glDeviceContext = GetDC((HWND)(params[0])); + PIXELFORMATDESCRIPTOR pfd = + { + sizeof(PIXELFORMATDESCRIPTOR), 1, + PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER, + PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + PFD_MAIN_PLANE, 0, 0, 0, 0 + }; + + int pf = 0; + if (!(pf = ChoosePixelFormat(glDeviceContext, &pfd))) return olc::FAIL; + SetPixelFormat(glDeviceContext, pf, &pfd); + + if (!(glRenderContext = wglCreateContext(glDeviceContext))) return olc::FAIL; + wglMakeCurrent(glDeviceContext, glRenderContext); + + // Set Vertical Sync + locSwapInterval = OGL_LOAD(locSwapInterval_t, wglSwapIntervalEXT); + if (locSwapInterval && !bVSYNC) locSwapInterval(0); + bSync = bVSYNC; +#endif + +#if defined(OLC_PLATFORM_X11) + using namespace X11; + // Linux has tighter coupling between OpenGL and X11, so we store + // various "platform" handles in the renderer + olc_Display = (X11::Display*)(params[0]); + olc_Window = (X11::Window*)(params[1]); + olc_VisualInfo = (X11::XVisualInfo*)(params[2]); + + glDeviceContext = glXCreateContext(olc_Display, olc_VisualInfo, nullptr, GL_TRUE); + glXMakeCurrent(olc_Display, *olc_Window, glDeviceContext); + + XWindowAttributes gwa; + XGetWindowAttributes(olc_Display, *olc_Window, &gwa); + glViewport(0, 0, gwa.width, gwa.height); + + locSwapInterval = OGL_LOAD(locSwapInterval_t, glXSwapIntervalEXT); + + if (locSwapInterval == nullptr && !bVSYNC) + { + printf("NOTE: Could not disable VSYNC, glXSwapIntervalEXT() was not found!\n"); + printf(" Don't worry though, things will still work, it's just the\n"); + printf(" frame rate will be capped to your monitors refresh rate - javidx9\n"); + } + + if (locSwapInterval != nullptr && !bVSYNC) + locSwapInterval(olc_Display, *olc_Window, 0); +#endif + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + EGLint const attribute_list[] = { EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 8, EGL_NONE }; + EGLint const context_config[] = { EGL_CONTEXT_CLIENT_VERSION , 2, EGL_NONE }; + EGLint num_config; + + olc_Display = eglGetDisplay(EGL_DEFAULT_DISPLAY); + eglInitialize(olc_Display, nullptr, nullptr); + eglChooseConfig(olc_Display, attribute_list, &olc_Config, 1, &num_config); + + /* create an EGL rendering context */ + olc_Context = eglCreateContext(olc_Display, olc_Config, EGL_NO_CONTEXT, context_config); + olc_Surface = eglCreateWindowSurface(olc_Display, olc_Config, NULL, nullptr); + eglMakeCurrent(olc_Display, olc_Surface, olc_Surface, olc_Context); + //eglSwapInterval is currently a NOP, plement anyways in case it becomes supported + locSwapInterval = &eglSwapInterval; + locSwapInterval(olc_Display, bVSYNC ? 1 : 0); +#endif + +#if defined(OLC_PLATFORM_GLUT) + mFullScreen = bFullScreen; + if (!bVSYNC) + { +#if defined(__APPLE__) + GLint sync = 0; + CGLContextObj ctx = CGLGetCurrentContext(); + if (ctx) CGLSetParameter(ctx, kCGLCPSwapInterval, &sync); +#endif + } +#else + #if !defined(OLC_PLATFORM_EMSCRIPTEN) + glEnable(GL_TEXTURE_2D); // Turn on texturing + glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); + #endif +#endif + // Load External OpenGL Functions + locCreateShader = OGL_LOAD(locCreateShader_t, glCreateShader); + locCompileShader = OGL_LOAD(locCompileShader_t, glCompileShader); + locShaderSource = OGL_LOAD(locShaderSource_t, glShaderSource); + locDeleteShader = OGL_LOAD(locDeleteShader_t, glDeleteShader); + locCreateProgram = OGL_LOAD(locCreateProgram_t, glCreateProgram); + locDeleteProgram = OGL_LOAD(locDeleteProgram_t, glDeleteProgram); + locLinkProgram = OGL_LOAD(locLinkProgram_t, glLinkProgram); + locAttachShader = OGL_LOAD(locAttachShader_t, glAttachShader); + locBindBuffer = OGL_LOAD(locBindBuffer_t, glBindBuffer); + locBufferData = OGL_LOAD(locBufferData_t, glBufferData); + locGenBuffers = OGL_LOAD(locGenBuffers_t, glGenBuffers); + locVertexAttribPointer = OGL_LOAD(locVertexAttribPointer_t, glVertexAttribPointer); + locEnableVertexAttribArray = OGL_LOAD(locEnableVertexAttribArray_t, glEnableVertexAttribArray); + locUseProgram = OGL_LOAD(locUseProgram_t, glUseProgram); + locGetShaderInfoLog = OGL_LOAD(locGetShaderInfoLog_t, glGetShaderInfoLog); +#if !defined(OLC_PLATFORM_EMSCRIPTEN) + locBindVertexArray = OGL_LOAD(locBindVertexArray_t, glBindVertexArray); + locGenVertexArrays = OGL_LOAD(locGenVertexArrays_t, glGenVertexArrays); +#else + locBindVertexArray = glBindVertexArrayOES; + locGenVertexArrays = glGenVertexArraysOES; +#endif + + // Load & Compile Quad Shader - assumes no errors + m_nFS = locCreateShader(0x8B30); + const GLchar* strFS = +#if defined(__arm__) || defined(OLC_PLATFORM_EMSCRIPTEN) + "#version 300 es\n" + "precision mediump float;" +#else + "#version 330 core\n" +#endif + "out vec4 pixel;\n""in vec2 oTex;\n" + "in vec4 oCol;\n""uniform sampler2D sprTex;\n""void main(){pixel = texture(sprTex, oTex) * oCol;}"; + locShaderSource(m_nFS, 1, &strFS, NULL); + locCompileShader(m_nFS); + + m_nVS = locCreateShader(0x8B31); + const GLchar* strVS = +#if defined(__arm__) || defined(OLC_PLATFORM_EMSCRIPTEN) + "#version 300 es\n" + "precision mediump float;" +#else + "#version 330 core\n" +#endif + "layout(location = 0) in vec3 aPos;\n""layout(location = 1) in vec2 aTex;\n" + "layout(location = 2) in vec4 aCol;\n""out vec2 oTex;\n""out vec4 oCol;\n" + "void main(){ float p = 1.0 / aPos.z; gl_Position = p * vec4(aPos.x, aPos.y, 0.0, 1.0); oTex = p * aTex; oCol = aCol;}"; + locShaderSource(m_nVS, 1, &strVS, NULL); + locCompileShader(m_nVS); + + m_nQuadShader = locCreateProgram(); + locAttachShader(m_nQuadShader, m_nFS); + locAttachShader(m_nQuadShader, m_nVS); + locLinkProgram(m_nQuadShader); + + // Create Quad + locGenBuffers(1, &m_vbQuad); + locGenVertexArrays(1, &m_vaQuad); + locBindVertexArray(m_vaQuad); + locBindBuffer(0x8892, m_vbQuad); + + locVertex verts[OLC_MAX_VERTS]; + locBufferData(0x8892, sizeof(locVertex) * OLC_MAX_VERTS, verts, 0x88E0); + locVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(locVertex), 0); locEnableVertexAttribArray(0); + locVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(locVertex), (void*)(3 * sizeof(float))); locEnableVertexAttribArray(1); + locVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(locVertex), (void*)(5 * sizeof(float))); locEnableVertexAttribArray(2); + locBindBuffer(0x8892, 0); + locBindVertexArray(0); + + // Create blank texture for spriteless decals + rendBlankQuad.Create(1, 1); + rendBlankQuad.Sprite()->GetData()[0] = olc::WHITE; + rendBlankQuad.Decal()->Update(); + return olc::rcode::OK; + } + + olc::rcode DestroyDevice() override + { +#if defined(OLC_PLATFORM_WINAPI) + wglDeleteContext(glRenderContext); +#endif + +#if defined(OLC_PLATFORM_X11) + glXMakeCurrent(olc_Display, None, NULL); + glXDestroyContext(olc_Display, glDeviceContext); +#endif + +#if defined(OLC_PLATFORM_GLUT) + glutDestroyWindow(glutGetWindow()); +#endif + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + eglMakeCurrent(olc_Display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + eglDestroyContext(olc_Display, olc_Context); + eglDestroySurface(olc_Display, olc_Surface); + eglTerminate(olc_Display); + olc_Display = EGL_NO_DISPLAY; + olc_Surface = EGL_NO_SURFACE; + olc_Context = EGL_NO_CONTEXT; +#endif + return olc::rcode::OK; + } + + void DisplayFrame() override + { +#if defined(OLC_PLATFORM_WINAPI) + SwapBuffers(glDeviceContext); + if (bSync) DwmFlush(); // Woooohooooooo!!!! SMOOOOOOOTH! +#endif + +#if defined(OLC_PLATFORM_X11) + X11::glXSwapBuffers(olc_Display, *olc_Window); +#endif + +#if defined(OLC_PLATFORM_GLUT) + glutSwapBuffers(); +#endif + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + eglSwapBuffers(olc_Display, olc_Surface); +#endif + } + + void PrepareDrawing() override + { + glEnable(GL_BLEND); + nDecalMode = DecalMode::NORMAL; + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + locUseProgram(m_nQuadShader); + locBindVertexArray(m_vaQuad); + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + locVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(locVertex), 0); locEnableVertexAttribArray(0); + locVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(locVertex), (void*)(3 * sizeof(float))); locEnableVertexAttribArray(1); + locVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(locVertex), (void*)(5 * sizeof(float))); locEnableVertexAttribArray(2); +#endif + } + + void SetDecalMode(const olc::DecalMode& mode) override + { + if (mode != nDecalMode) + { + switch (mode) + { + case olc::DecalMode::NORMAL: glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); break; + case olc::DecalMode::ADDITIVE: glBlendFunc(GL_SRC_ALPHA, GL_ONE); break; + case olc::DecalMode::MULTIPLICATIVE: glBlendFunc(GL_DST_COLOR, GL_ONE_MINUS_SRC_ALPHA); break; + case olc::DecalMode::STENCIL: glBlendFunc(GL_ZERO, GL_SRC_ALPHA); break; + case olc::DecalMode::ILLUMINATE: glBlendFunc(GL_ONE_MINUS_SRC_ALPHA, GL_SRC_ALPHA); break; + case olc::DecalMode::WIREFRAME: glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); break; + } + + nDecalMode = mode; + } + } + + void DrawLayerQuad(const olc::vf2d& offset, const olc::vf2d& scale, const olc::Pixel tint) override + { + locBindBuffer(0x8892, m_vbQuad); + locVertex verts[4] = { + {{-1.0f, -1.0f, 1.0}, {0.0f * scale.x + offset.x, 1.0f * scale.y + offset.y}, tint}, + {{+1.0f, -1.0f, 1.0}, {1.0f * scale.x + offset.x, 1.0f * scale.y + offset.y}, tint}, + {{-1.0f, +1.0f, 1.0}, {0.0f * scale.x + offset.x, 0.0f * scale.y + offset.y}, tint}, + {{+1.0f, +1.0f, 1.0}, {1.0f * scale.x + offset.x, 0.0f * scale.y + offset.y}, tint}, + }; + + locBufferData(0x8892, sizeof(locVertex) * 4, verts, 0x88E0); + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + } + + void DrawDecal(const olc::DecalInstance& decal) override + { + SetDecalMode(decal.mode); + if (decal.decal == nullptr) + glBindTexture(GL_TEXTURE_2D, rendBlankQuad.Decal()->id); + else + glBindTexture(GL_TEXTURE_2D, decal.decal->id); + + locBindBuffer(0x8892, m_vbQuad); + + for (uint32_t i = 0; i < decal.points; i++) + pVertexMem[i] = { { decal.pos[i].x, decal.pos[i].y, decal.w[i] }, { decal.uv[i].x, decal.uv[i].y }, decal.tint[i] }; + + locBufferData(0x8892, sizeof(locVertex) * decal.points, pVertexMem, 0x88E0); + + if (nDecalMode == DecalMode::WIREFRAME) + glDrawArrays(GL_LINE_LOOP, 0, decal.points); + else + { + if (decal.structure == olc::DecalStructure::FAN) + glDrawArrays(GL_TRIANGLE_FAN, 0, decal.points); + else if (decal.structure == olc::DecalStructure::STRIP) + glDrawArrays(GL_TRIANGLE_STRIP, 0, decal.points); + else if (decal.structure == olc::DecalStructure::LIST) + glDrawArrays(GL_TRIANGLES, 0, decal.points); + } + } + + uint32_t CreateTexture(const uint32_t width, const uint32_t height, const bool filtered, const bool clamp) override + { + UNUSED(width); + UNUSED(height); + uint32_t id = 0; + glGenTextures(1, &id); + glBindTexture(GL_TEXTURE_2D, id); + + if (filtered) + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + } + else + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + } + + if (clamp) + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); + } + else + { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + } +#if !defined(OLC_PLATFORM_EMSCRIPTEN) + glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); +#endif + return id; + } + + uint32_t DeleteTexture(const uint32_t id) override + { + glDeleteTextures(1, &id); + return id; + } + + void UpdateTexture(uint32_t id, olc::Sprite* spr) override + { + UNUSED(id); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, spr->width, spr->height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spr->GetData()); + } + + void ReadTexture(uint32_t id, olc::Sprite* spr) override + { + glReadPixels(0, 0, spr->width, spr->height, GL_RGBA, GL_UNSIGNED_BYTE, spr->GetData()); + } + + void ApplyTexture(uint32_t id) override + { + glBindTexture(GL_TEXTURE_2D, id); + } + + void ClearBuffer(olc::Pixel p, bool bDepth) override + { + glClearColor(float(p.r) / 255.0f, float(p.g) / 255.0f, float(p.b) / 255.0f, float(p.a) / 255.0f); + glClear(GL_COLOR_BUFFER_BIT); + if (bDepth) glClear(GL_DEPTH_BUFFER_BIT); + } + + void UpdateViewport(const olc::vi2d& pos, const olc::vi2d& size) override + { + glViewport(pos.x, pos.y, size.x, size.y); + } + }; +} +#endif +// O------------------------------------------------------------------------------O +// | END RENDERER: OpenGL 3.3 (3.0 es) (sh-sh-sh-shaders....) | +// O------------------------------------------------------------------------------O +#pragma endregion + +// O------------------------------------------------------------------------------O +// | olcPixelGameEngine Image loaders | +// O------------------------------------------------------------------------------O + +#pragma region image_gdi +// O------------------------------------------------------------------------------O +// | START IMAGE LOADER: GDI+, Windows Only, always exists, a little slow | +// O------------------------------------------------------------------------------O +#if defined(OLC_IMAGE_GDI) + +#define min(a, b) ((a < b) ? a : b) +#define max(a, b) ((a > b) ? a : b) +#include +#include +#if defined(__MINGW32__) // Thanks Gusgo & Dandistine, but c'mon mingw!! wtf?! + #include +#else + #include +#endif +#include +#undef min +#undef max + +#if !defined(__MINGW32__) + #pragma comment(lib, "gdiplus.lib") + #pragma comment(lib, "Shlwapi.lib") +#endif + +namespace olc +{ + // Thanks @MaGetzUb for this, which allows sprites to be defined + // at construction, by initialising the GDI subsystem + static class GDIPlusStartup + { + public: + GDIPlusStartup() + { + Gdiplus::GdiplusStartupInput startupInput; + GdiplusStartup(&token, &startupInput, NULL); + } + + ULONG_PTR token; + + ~GDIPlusStartup() + { + // Well, MarcusTU thought this was important :D + Gdiplus::GdiplusShutdown(token); + } + } gdistartup; + + class ImageLoader_GDIPlus : public olc::ImageLoader + { + private: + std::wstring ConvertS2W(std::string s) + { +#ifdef __MINGW32__ + wchar_t* buffer = new wchar_t[s.length() + 1]; + mbstowcs(buffer, s.c_str(), s.length()); + buffer[s.length()] = L'\0'; +#else + int count = MultiByteToWideChar(CP_UTF8, 0, s.c_str(), -1, NULL, 0); + wchar_t* buffer = new wchar_t[count]; + MultiByteToWideChar(CP_UTF8, 0, s.c_str(), -1, buffer, count); +#endif + std::wstring w(buffer); + delete[] buffer; + return w; + } + + public: + ImageLoader_GDIPlus() : ImageLoader() + {} + + olc::rcode LoadImageResource(olc::Sprite* spr, const std::string& sImageFile, olc::ResourcePack* pack) override + { + // clear out existing sprite + spr->pColData.clear(); + + // Open file + UNUSED(pack); + Gdiplus::Bitmap* bmp = nullptr; + if (pack != nullptr) + { + // Load sprite from input stream + ResourceBuffer rb = pack->GetFileBuffer(sImageFile); + bmp = Gdiplus::Bitmap::FromStream(SHCreateMemStream((BYTE*)rb.vMemory.data(), UINT(rb.vMemory.size()))); + } + else + { + // Check file exists + if (!_gfs::exists(sImageFile)) return olc::rcode::NO_FILE; + + // Load sprite from file + bmp = Gdiplus::Bitmap::FromFile(ConvertS2W(sImageFile).c_str()); + } + + if (bmp->GetLastStatus() != Gdiplus::Ok) return olc::rcode::FAIL; + spr->width = bmp->GetWidth(); + spr->height = bmp->GetHeight(); + + spr->pColData.resize(spr->width * spr->height); + + for (int y = 0; y < spr->height; y++) + for (int x = 0; x < spr->width; x++) + { + Gdiplus::Color c; + bmp->GetPixel(x, y, &c); + spr->SetPixel(x, y, olc::Pixel(c.GetRed(), c.GetGreen(), c.GetBlue(), c.GetAlpha())); + } + delete bmp; + return olc::rcode::OK; + } + + olc::rcode SaveImageResource(olc::Sprite* spr, const std::string& sImageFile) override + { + return olc::rcode::OK; + } + }; +} +#endif +// O------------------------------------------------------------------------------O +// | END IMAGE LOADER: GDI+ | +// O------------------------------------------------------------------------------O +#pragma endregion + +#pragma region image_libpng +// O------------------------------------------------------------------------------O +// | START IMAGE LOADER: libpng, default on linux, requires -lpng (libpng-dev) | +// O------------------------------------------------------------------------------O +#if defined(OLC_IMAGE_LIBPNG) +#include +namespace olc +{ + void pngReadStream(png_structp pngPtr, png_bytep data, png_size_t length) + { + png_voidp a = png_get_io_ptr(pngPtr); + ((std::istream*)a)->read((char*)data, length); + } + + class ImageLoader_LibPNG : public olc::ImageLoader + { + public: + ImageLoader_LibPNG() : ImageLoader() + {} + + olc::rcode LoadImageResource(olc::Sprite* spr, const std::string& sImageFile, olc::ResourcePack* pack) override + { + UNUSED(pack); + + // clear out existing sprite + spr->pColData.clear(); + + //////////////////////////////////////////////////////////////////////////// + // Use libpng, Thanks to Guillaume Cottenceau + // https://gist.github.com/niw/5963798 + // Also reading png from streams + // http://www.piko3d.net/tutorials/libpng-tutorial-loading-png-files-from-streams/ + png_structp png; + png_infop info; + + auto loadPNG = [&]() + { + png_read_info(png, info); + png_byte color_type; + png_byte bit_depth; + png_bytep* row_pointers; + spr->width = png_get_image_width(png, info); + spr->height = png_get_image_height(png, info); + color_type = png_get_color_type(png, info); + bit_depth = png_get_bit_depth(png, info); + if (bit_depth == 16) png_set_strip_16(png); + if (color_type == PNG_COLOR_TYPE_PALETTE) png_set_palette_to_rgb(png); + if (color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8) png_set_expand_gray_1_2_4_to_8(png); + if (png_get_valid(png, info, PNG_INFO_tRNS)) png_set_tRNS_to_alpha(png); + if (color_type == PNG_COLOR_TYPE_RGB || color_type == PNG_COLOR_TYPE_GRAY || color_type == PNG_COLOR_TYPE_PALETTE) + png_set_filler(png, 0xFF, PNG_FILLER_AFTER); + if (color_type == PNG_COLOR_TYPE_GRAY || color_type == PNG_COLOR_TYPE_GRAY_ALPHA) + png_set_gray_to_rgb(png); + png_read_update_info(png, info); + row_pointers = (png_bytep*)malloc(sizeof(png_bytep) * spr->height); + for (int y = 0; y < spr->height; y++) { + row_pointers[y] = (png_byte*)malloc(png_get_rowbytes(png, info)); + } + png_read_image(png, row_pointers); + //////////////////////////////////////////////////////////////////////////// + // Create sprite array + spr->pColData.resize(spr->width * spr->height); + // Iterate through image rows, converting into sprite format + for (int y = 0; y < spr->height; y++) + { + png_bytep row = row_pointers[y]; + for (int x = 0; x < spr->width; x++) + { + png_bytep px = &(row[x * 4]); + spr->SetPixel(x, y, Pixel(px[0], px[1], px[2], px[3])); + } + } + + for (int y = 0; y < spr->height; y++) // Thanks maksym33 + free(row_pointers[y]); + free(row_pointers); + png_destroy_read_struct(&png, &info, nullptr); + }; + + png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); + if (!png) goto fail_load; + + info = png_create_info_struct(png); + if (!info) goto fail_load; + + if (setjmp(png_jmpbuf(png))) goto fail_load; + + if (pack == nullptr) + { + FILE* f = fopen(sImageFile.c_str(), "rb"); + if (!f) return olc::rcode::NO_FILE; + png_init_io(png, f); + loadPNG(); + fclose(f); + } + else + { + ResourceBuffer rb = pack->GetFileBuffer(sImageFile); + std::istream is(&rb); + png_set_read_fn(png, (png_voidp)&is, pngReadStream); + loadPNG(); + } + + return olc::rcode::OK; + + fail_load: + spr->width = 0; + spr->height = 0; + spr->pColData.clear(); + return olc::rcode::FAIL; + } + + olc::rcode SaveImageResource(olc::Sprite* spr, const std::string& sImageFile) override + { + return olc::rcode::OK; + } + }; +} +#endif +// O------------------------------------------------------------------------------O +// | END IMAGE LOADER: | +// O------------------------------------------------------------------------------O +#pragma endregion + + +// O------------------------------------------------------------------------------O +// | olcPixelGameEngine Platforms | +// O------------------------------------------------------------------------------O + +#pragma region platform_windows +// O------------------------------------------------------------------------------O +// | START PLATFORM: MICROSOFT WINDOWS XP, VISTA, 7, 8, 10 | +// O------------------------------------------------------------------------------O +#if defined(OLC_PLATFORM_WINAPI) + +#if defined(_WIN32) && !defined(__MINGW32__) + #pragma comment(lib, "user32.lib") // Visual Studio Only + #pragma comment(lib, "gdi32.lib") // For other Windows Compilers please add + #pragma comment(lib, "opengl32.lib") // these libs to your linker input +#endif + +namespace olc +{ + class Platform_Windows : public olc::Platform + { + private: + HWND olc_hWnd = nullptr; + std::wstring wsAppName; + + std::wstring ConvertS2W(std::string s) + { +#ifdef __MINGW32__ + wchar_t* buffer = new wchar_t[s.length() + 1]; + mbstowcs(buffer, s.c_str(), s.length()); + buffer[s.length()] = L'\0'; +#else + int count = MultiByteToWideChar(CP_UTF8, 0, s.c_str(), -1, NULL, 0); + wchar_t* buffer = new wchar_t[count]; + MultiByteToWideChar(CP_UTF8, 0, s.c_str(), -1, buffer, count); +#endif + std::wstring w(buffer); + delete[] buffer; + return w; + } + + + + public: + virtual olc::rcode ApplicationStartUp() override { return olc::rcode::OK; } + virtual olc::rcode ApplicationCleanUp() override { return olc::rcode::OK; } + virtual olc::rcode ThreadStartUp() override { return olc::rcode::OK; } + + virtual olc::rcode ThreadCleanUp() override + { + renderer->DestroyDevice(); + PostMessage(olc_hWnd, WM_DESTROY, 0, 0); + return olc::OK; + } + + virtual olc::rcode CreateGraphics(bool bFullScreen, bool bEnableVSYNC, const olc::vi2d& vViewPos, const olc::vi2d& vViewSize) override + { + if (renderer->CreateDevice({ olc_hWnd }, bFullScreen, bEnableVSYNC) == olc::rcode::OK) + { + renderer->UpdateViewport(vViewPos, vViewSize); + return olc::rcode::OK; + } + else + return olc::rcode::FAIL; + } + + virtual olc::rcode CreateWindowPane(const olc::vi2d& vWindowPos, olc::vi2d& vWindowSize, bool bFullScreen) override + { + WNDCLASS wc; + wc.hIcon = LoadIcon(NULL, IDI_APPLICATION); + wc.hCursor = LoadCursor(NULL, IDC_ARROW); + wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC; + wc.hInstance = GetModuleHandle(nullptr); + wc.lpfnWndProc = olc_WindowEvent; + wc.cbClsExtra = 0; + wc.cbWndExtra = 0; + wc.lpszMenuName = nullptr; + wc.hbrBackground = nullptr; + wc.lpszClassName = olcT("OLC_PIXEL_GAME_ENGINE"); + RegisterClass(&wc); + + // Define window furniture + DWORD dwExStyle = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE; + DWORD dwStyle = WS_CAPTION | WS_SYSMENU | WS_VISIBLE | WS_THICKFRAME; + + olc::vi2d vTopLeft = vWindowPos; + + // Handle Fullscreen + if (bFullScreen) + { + dwExStyle = 0; + dwStyle = WS_VISIBLE | WS_POPUP; + HMONITOR hmon = MonitorFromWindow(olc_hWnd, MONITOR_DEFAULTTONEAREST); + MONITORINFO mi = { sizeof(mi) }; + if (!GetMonitorInfo(hmon, &mi)) return olc::rcode::FAIL; + vWindowSize = { mi.rcMonitor.right, mi.rcMonitor.bottom }; + vTopLeft.x = 0; + vTopLeft.y = 0; + } + + // Keep client size as requested + RECT rWndRect = { 0, 0, vWindowSize.x, vWindowSize.y }; + AdjustWindowRectEx(&rWndRect, dwStyle, FALSE, dwExStyle); + int width = rWndRect.right - rWndRect.left; + int height = rWndRect.bottom - rWndRect.top; + + olc_hWnd = CreateWindowEx(dwExStyle, olcT("OLC_PIXEL_GAME_ENGINE"), olcT(""), dwStyle, + vTopLeft.x, vTopLeft.y, width, height, NULL, NULL, GetModuleHandle(nullptr), this); + + DragAcceptFiles(olc_hWnd, true); + + // Create Keyboard Mapping + mapKeys[0x00] = Key::NONE; + mapKeys[0x41] = Key::A; mapKeys[0x42] = Key::B; mapKeys[0x43] = Key::C; mapKeys[0x44] = Key::D; mapKeys[0x45] = Key::E; + mapKeys[0x46] = Key::F; mapKeys[0x47] = Key::G; mapKeys[0x48] = Key::H; mapKeys[0x49] = Key::I; mapKeys[0x4A] = Key::J; + mapKeys[0x4B] = Key::K; mapKeys[0x4C] = Key::L; mapKeys[0x4D] = Key::M; mapKeys[0x4E] = Key::N; mapKeys[0x4F] = Key::O; + mapKeys[0x50] = Key::P; mapKeys[0x51] = Key::Q; mapKeys[0x52] = Key::R; mapKeys[0x53] = Key::S; mapKeys[0x54] = Key::T; + mapKeys[0x55] = Key::U; mapKeys[0x56] = Key::V; mapKeys[0x57] = Key::W; mapKeys[0x58] = Key::X; mapKeys[0x59] = Key::Y; + mapKeys[0x5A] = Key::Z; + + mapKeys[VK_F1] = Key::F1; mapKeys[VK_F2] = Key::F2; mapKeys[VK_F3] = Key::F3; mapKeys[VK_F4] = Key::F4; + mapKeys[VK_F5] = Key::F5; mapKeys[VK_F6] = Key::F6; mapKeys[VK_F7] = Key::F7; mapKeys[VK_F8] = Key::F8; + mapKeys[VK_F9] = Key::F9; mapKeys[VK_F10] = Key::F10; mapKeys[VK_F11] = Key::F11; mapKeys[VK_F12] = Key::F12; + + mapKeys[VK_DOWN] = Key::DOWN; mapKeys[VK_LEFT] = Key::LEFT; mapKeys[VK_RIGHT] = Key::RIGHT; mapKeys[VK_UP] = Key::UP; + //mapKeys[VK_RETURN] = Key::ENTER;// mapKeys[VK_RETURN] = Key::RETURN; + + mapKeys[VK_BACK] = Key::BACK; mapKeys[VK_ESCAPE] = Key::ESCAPE; mapKeys[VK_RETURN] = Key::ENTER; mapKeys[VK_PAUSE] = Key::PAUSE; + mapKeys[VK_SCROLL] = Key::SCROLL; mapKeys[VK_TAB] = Key::TAB; mapKeys[VK_DELETE] = Key::DEL; mapKeys[VK_HOME] = Key::HOME; + mapKeys[VK_END] = Key::END; mapKeys[VK_PRIOR] = Key::PGUP; mapKeys[VK_NEXT] = Key::PGDN; mapKeys[VK_INSERT] = Key::INS; + mapKeys[VK_SHIFT] = Key::SHIFT; mapKeys[VK_CONTROL] = Key::CTRL; + mapKeys[VK_SPACE] = Key::SPACE; + + mapKeys[0x30] = Key::K0; mapKeys[0x31] = Key::K1; mapKeys[0x32] = Key::K2; mapKeys[0x33] = Key::K3; mapKeys[0x34] = Key::K4; + mapKeys[0x35] = Key::K5; mapKeys[0x36] = Key::K6; mapKeys[0x37] = Key::K7; mapKeys[0x38] = Key::K8; mapKeys[0x39] = Key::K9; + + mapKeys[VK_NUMPAD0] = Key::NP0; mapKeys[VK_NUMPAD1] = Key::NP1; mapKeys[VK_NUMPAD2] = Key::NP2; mapKeys[VK_NUMPAD3] = Key::NP3; mapKeys[VK_NUMPAD4] = Key::NP4; + mapKeys[VK_NUMPAD5] = Key::NP5; mapKeys[VK_NUMPAD6] = Key::NP6; mapKeys[VK_NUMPAD7] = Key::NP7; mapKeys[VK_NUMPAD8] = Key::NP8; mapKeys[VK_NUMPAD9] = Key::NP9; + mapKeys[VK_MULTIPLY] = Key::NP_MUL; mapKeys[VK_ADD] = Key::NP_ADD; mapKeys[VK_DIVIDE] = Key::NP_DIV; mapKeys[VK_SUBTRACT] = Key::NP_SUB; mapKeys[VK_DECIMAL] = Key::NP_DECIMAL; + + // Thanks scripticuk + mapKeys[VK_OEM_1] = Key::OEM_1; // On US and UK keyboards this is the ';:' key + mapKeys[VK_OEM_2] = Key::OEM_2; // On US and UK keyboards this is the '/?' key + mapKeys[VK_OEM_3] = Key::OEM_3; // On US keyboard this is the '~' key + mapKeys[VK_OEM_4] = Key::OEM_4; // On US and UK keyboards this is the '[{' key + mapKeys[VK_OEM_5] = Key::OEM_5; // On US keyboard this is '\|' key. + mapKeys[VK_OEM_6] = Key::OEM_6; // On US and UK keyboards this is the ']}' key + mapKeys[VK_OEM_7] = Key::OEM_7; // On US keyboard this is the single/double quote key. On UK, this is the single quote/@ symbol key + mapKeys[VK_OEM_8] = Key::OEM_8; // miscellaneous characters. Varies by keyboard + mapKeys[VK_OEM_PLUS] = Key::EQUALS; // the '+' key on any keyboard + mapKeys[VK_OEM_COMMA] = Key::COMMA; // the comma key on any keyboard + mapKeys[VK_OEM_MINUS] = Key::MINUS; // the minus key on any keyboard + mapKeys[VK_OEM_PERIOD] = Key::PERIOD; // the period key on any keyboard + mapKeys[VK_CAPITAL] = Key::CAPS_LOCK; + return olc::OK; + } + + virtual olc::rcode SetWindowTitle(const std::string& s) override + { +#ifdef UNICODE + SetWindowText(olc_hWnd, ConvertS2W(s).c_str()); +#else + SetWindowText(olc_hWnd, s.c_str()); +#endif + return olc::OK; + } + + virtual olc::rcode StartSystemEventLoop() override + { + MSG msg; + while (GetMessage(&msg, NULL, 0, 0) > 0) + { + TranslateMessage(&msg); + DispatchMessage(&msg); + } + return olc::OK; + } + + virtual olc::rcode HandleSystemEvent() override { + struct tagPOINT p{0,0}; + //Update mouse positions and states outside the window. + GetCursorPos(&p); + ptrPGE->olc_UpdateMouse(p.x-ptrPGE->GetWindowPos().x,p.y-ptrPGE->GetWindowPos().y); + ptrPGE->olc_UpdateMouseState(0,GetAsyncKeyState(VK_LBUTTON)>>7); + ptrPGE->olc_UpdateMouseState(1,GetAsyncKeyState(VK_RBUTTON)>>7); + ptrPGE->olc_UpdateMouseState(2,GetAsyncKeyState(VK_MBUTTON)>>7); + ptrPGE->olc_UpdateMouseState(3,GetAsyncKeyState(VK_XBUTTON1)>>7); + ptrPGE->olc_UpdateMouseState(4,GetAsyncKeyState(VK_XBUTTON2)>>7); + return olc::rcode::OK; + } + + // Windows Event Handler - this is statically connected to the windows event system + static LRESULT CALLBACK olc_WindowEvent(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam) + { + switch (uMsg) + { + case WM_MOUSEMOVE: + { + // Thanks @ForAbby (Discord) + uint16_t x = lParam & 0xFFFF; uint16_t y = (lParam >> 16) & 0xFFFF; + int16_t ix = *(int16_t*)&x; int16_t iy = *(int16_t*)&y; + ptrPGE->olc_UpdateMouse(ix, iy); + return 0; + } + case WM_MOVE: ptrPGE->olc_UpdateWindowPos(lParam & 0xFFFF, (lParam >> 16) & 0xFFFF); return 0; + case WM_SIZE: ptrPGE->olc_UpdateWindowSize(lParam & 0xFFFF, (lParam >> 16) & 0xFFFF); return 0; + case WM_MOUSEWHEEL: ptrPGE->olc_UpdateMouseWheel(GET_WHEEL_DELTA_WPARAM(wParam)); return 0; + case WM_MOUSELEAVE: ptrPGE->olc_UpdateMouseFocus(false); return 0; + case WM_SETFOCUS: ptrPGE->olc_UpdateKeyFocus(true); return 0; + case WM_KILLFOCUS: ptrPGE->olc_UpdateKeyFocus(false); return 0; + case WM_KEYDOWN: ptrPGE->olc_UpdateKeyState(mapKeys[wParam], true); return 0; + case WM_KEYUP: ptrPGE->olc_UpdateKeyState(mapKeys[wParam], false); return 0; + case WM_SYSKEYDOWN: ptrPGE->olc_UpdateKeyState(mapKeys[wParam], true); return 0; + case WM_SYSKEYUP: ptrPGE->olc_UpdateKeyState(mapKeys[wParam], false); return 0; + case WM_LBUTTONDOWN:ptrPGE->olc_UpdateMouseState(0, true); return 0; + case WM_LBUTTONUP: ptrPGE->olc_UpdateMouseState(0, false); return 0; + case WM_RBUTTONDOWN:ptrPGE->olc_UpdateMouseState(1, true); return 0; + case WM_RBUTTONUP: ptrPGE->olc_UpdateMouseState(1, false); return 0; + case WM_MBUTTONDOWN:ptrPGE->olc_UpdateMouseState(2, true); return 0; + case WM_MBUTTONUP: ptrPGE->olc_UpdateMouseState(2, false); return 0; + case WM_DROPFILES: + { + // This is all eww... + HDROP drop = (HDROP)wParam; + + uint32_t nFiles = DragQueryFile(drop, 0xFFFFFFFF, nullptr, 0); + std::vector vFiles; + for (uint32_t i = 0; i < nFiles; i++) + { + TCHAR dfbuffer[256]{}; + uint32_t len = DragQueryFile(drop, i, nullptr, 0); + DragQueryFile(drop, i, dfbuffer, 256); +#ifdef UNICODE + #ifdef __MINGW32__ + char* buffer = new char[len + 1]; + wcstombs(buffer, dfbuffer, len); + buffer[len] = '\0'; + #else + int count = WideCharToMultiByte(CP_UTF8, 0, dfbuffer, -1, NULL, 0, NULL, NULL); + char* buffer = new char[count]; + WideCharToMultiByte(CP_UTF8, 0, dfbuffer, -1, buffer, count, NULL, NULL); + #endif + vFiles.push_back(std::string(buffer)); + delete[] buffer; +#else + vFiles.push_back(std::string(dfbuffer)); +#endif + } + + // Even more eww... + POINT p; DragQueryPoint(drop, &p); + ptrPGE->olc_DropFiles(p.x, p.y, vFiles); + DragFinish(drop); + return 0; + } + break; + + + case WM_CLOSE: ptrPGE->olc_Terminate(); return 0; + case WM_DESTROY: PostQuitMessage(0); DestroyWindow(hWnd); return 0; + } + return DefWindowProc(hWnd, uMsg, wParam, lParam); + } + }; +} +#endif +// O------------------------------------------------------------------------------O +// | END PLATFORM: MICROSOFT WINDOWS XP, VISTA, 7, 8, 10 | +// O------------------------------------------------------------------------------O +#pragma endregion + +#pragma region platform_linux +// O------------------------------------------------------------------------------O +// | START PLATFORM: LINUX | +// O------------------------------------------------------------------------------O +#if defined(OLC_PLATFORM_X11) +namespace olc +{ + class Platform_Linux : public olc::Platform + { + private: + X11::Display* olc_Display = nullptr; + X11::Window olc_WindowRoot; + X11::Window olc_Window; + X11::XVisualInfo* olc_VisualInfo; + X11::Colormap olc_ColourMap; + X11::XSetWindowAttributes olc_SetWindowAttribs; + + public: + virtual olc::rcode ApplicationStartUp() override + { + return olc::rcode::OK; + } + + virtual olc::rcode ApplicationCleanUp() override + { + XDestroyWindow(olc_Display, olc_Window); + return olc::rcode::OK; + } + + virtual olc::rcode ThreadStartUp() override + { + return olc::rcode::OK; + } + + virtual olc::rcode ThreadCleanUp() override + { + renderer->DestroyDevice(); + return olc::OK; + } + + virtual olc::rcode CreateGraphics(bool bFullScreen, bool bEnableVSYNC, const olc::vi2d& vViewPos, const olc::vi2d& vViewSize) override + { + if (renderer->CreateDevice({ olc_Display, &olc_Window, olc_VisualInfo }, bFullScreen, bEnableVSYNC) == olc::rcode::OK) + { + renderer->UpdateViewport(vViewPos, vViewSize); + return olc::rcode::OK; + } + else + return olc::rcode::FAIL; + } + + virtual olc::rcode CreateWindowPane(const olc::vi2d& vWindowPos, olc::vi2d& vWindowSize, bool bFullScreen) override + { + using namespace X11; + XInitThreads(); + + // Grab the deafult display and window + olc_Display = XOpenDisplay(NULL); + olc_WindowRoot = DefaultRootWindow(olc_Display); + + // Based on the display capabilities, configure the appearance of the window + GLint olc_GLAttribs[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None }; + olc_VisualInfo = glXChooseVisual(olc_Display, 0, olc_GLAttribs); + olc_ColourMap = XCreateColormap(olc_Display, olc_WindowRoot, olc_VisualInfo->visual, AllocNone); + olc_SetWindowAttribs.colormap = olc_ColourMap; + + // Register which events we are interested in receiving + olc_SetWindowAttribs.event_mask = ExposureMask | KeyPressMask | KeyReleaseMask | + ButtonPressMask | ButtonReleaseMask | PointerMotionMask | FocusChangeMask | StructureNotifyMask; + + // Create the window + olc_Window = XCreateWindow(olc_Display, olc_WindowRoot, vWindowPos.x, vWindowPos.y, + vWindowSize.x, vWindowSize.y, + 0, olc_VisualInfo->depth, InputOutput, olc_VisualInfo->visual, + CWColormap | CWEventMask, &olc_SetWindowAttribs); + + Atom wmDelete = XInternAtom(olc_Display, "WM_DELETE_WINDOW", true); + XSetWMProtocols(olc_Display, olc_Window, &wmDelete, 1); + + XMapWindow(olc_Display, olc_Window); + XStoreName(olc_Display, olc_Window, "OneLoneCoder.com - Pixel Game Engine"); + + if (bFullScreen) // Thanks DragonEye, again :D + { + Atom wm_state; + Atom fullscreen; + wm_state = XInternAtom(olc_Display, "_NET_WM_STATE", False); + fullscreen = XInternAtom(olc_Display, "_NET_WM_STATE_FULLSCREEN", False); + XEvent xev{ 0 }; + xev.type = ClientMessage; + xev.xclient.window = olc_Window; + xev.xclient.message_type = wm_state; + xev.xclient.format = 32; + xev.xclient.data.l[0] = (bFullScreen ? 1 : 0); // the action (0: off, 1: on, 2: toggle) + xev.xclient.data.l[1] = fullscreen; // first property to alter + xev.xclient.data.l[2] = 0; // second property to alter + xev.xclient.data.l[3] = 0; // source indication + XMapWindow(olc_Display, olc_Window); + XSendEvent(olc_Display, DefaultRootWindow(olc_Display), False, + SubstructureRedirectMask | SubstructureNotifyMask, &xev); + XFlush(olc_Display); + XWindowAttributes gwa; + XGetWindowAttributes(olc_Display, olc_Window, &gwa); + vWindowSize.x = gwa.width; + vWindowSize.y = gwa.height; + } + + // Create Keyboard Mapping + mapKeys[0x00] = Key::NONE; + mapKeys[0x61] = Key::A; mapKeys[0x62] = Key::B; mapKeys[0x63] = Key::C; mapKeys[0x64] = Key::D; mapKeys[0x65] = Key::E; + mapKeys[0x66] = Key::F; mapKeys[0x67] = Key::G; mapKeys[0x68] = Key::H; mapKeys[0x69] = Key::I; mapKeys[0x6A] = Key::J; + mapKeys[0x6B] = Key::K; mapKeys[0x6C] = Key::L; mapKeys[0x6D] = Key::M; mapKeys[0x6E] = Key::N; mapKeys[0x6F] = Key::O; + mapKeys[0x70] = Key::P; mapKeys[0x71] = Key::Q; mapKeys[0x72] = Key::R; mapKeys[0x73] = Key::S; mapKeys[0x74] = Key::T; + mapKeys[0x75] = Key::U; mapKeys[0x76] = Key::V; mapKeys[0x77] = Key::W; mapKeys[0x78] = Key::X; mapKeys[0x79] = Key::Y; + mapKeys[0x7A] = Key::Z; + + mapKeys[XK_F1] = Key::F1; mapKeys[XK_F2] = Key::F2; mapKeys[XK_F3] = Key::F3; mapKeys[XK_F4] = Key::F4; + mapKeys[XK_F5] = Key::F5; mapKeys[XK_F6] = Key::F6; mapKeys[XK_F7] = Key::F7; mapKeys[XK_F8] = Key::F8; + mapKeys[XK_F9] = Key::F9; mapKeys[XK_F10] = Key::F10; mapKeys[XK_F11] = Key::F11; mapKeys[XK_F12] = Key::F12; + + mapKeys[XK_Down] = Key::DOWN; mapKeys[XK_Left] = Key::LEFT; mapKeys[XK_Right] = Key::RIGHT; mapKeys[XK_Up] = Key::UP; + mapKeys[XK_KP_Enter] = Key::ENTER; mapKeys[XK_Return] = Key::ENTER; + + mapKeys[XK_BackSpace] = Key::BACK; mapKeys[XK_Escape] = Key::ESCAPE; mapKeys[XK_Linefeed] = Key::ENTER; mapKeys[XK_Pause] = Key::PAUSE; + mapKeys[XK_Scroll_Lock] = Key::SCROLL; mapKeys[XK_Tab] = Key::TAB; mapKeys[XK_Delete] = Key::DEL; mapKeys[XK_Home] = Key::HOME; + mapKeys[XK_End] = Key::END; mapKeys[XK_Page_Up] = Key::PGUP; mapKeys[XK_Page_Down] = Key::PGDN; mapKeys[XK_Insert] = Key::INS; + mapKeys[XK_Shift_L] = Key::SHIFT; mapKeys[XK_Shift_R] = Key::SHIFT; mapKeys[XK_Control_L] = Key::CTRL; mapKeys[XK_Control_R] = Key::CTRL; + mapKeys[XK_space] = Key::SPACE; mapKeys[XK_period] = Key::PERIOD; + + mapKeys[XK_0] = Key::K0; mapKeys[XK_1] = Key::K1; mapKeys[XK_2] = Key::K2; mapKeys[XK_3] = Key::K3; mapKeys[XK_4] = Key::K4; + mapKeys[XK_5] = Key::K5; mapKeys[XK_6] = Key::K6; mapKeys[XK_7] = Key::K7; mapKeys[XK_8] = Key::K8; mapKeys[XK_9] = Key::K9; + + mapKeys[XK_KP_0] = Key::NP0; mapKeys[XK_KP_1] = Key::NP1; mapKeys[XK_KP_2] = Key::NP2; mapKeys[XK_KP_3] = Key::NP3; mapKeys[XK_KP_4] = Key::NP4; + mapKeys[XK_KP_5] = Key::NP5; mapKeys[XK_KP_6] = Key::NP6; mapKeys[XK_KP_7] = Key::NP7; mapKeys[XK_KP_8] = Key::NP8; mapKeys[XK_KP_9] = Key::NP9; + mapKeys[XK_KP_Multiply] = Key::NP_MUL; mapKeys[XK_KP_Add] = Key::NP_ADD; mapKeys[XK_KP_Divide] = Key::NP_DIV; mapKeys[XK_KP_Subtract] = Key::NP_SUB; mapKeys[XK_KP_Decimal] = Key::NP_DECIMAL; + + // These keys vary depending on the keyboard. I've included comments for US and UK keyboard layouts + mapKeys[XK_semicolon] = Key::OEM_1; // On US and UK keyboards this is the ';:' key + mapKeys[XK_slash] = Key::OEM_2; // On US and UK keyboards this is the '/?' key + mapKeys[XK_asciitilde] = Key::OEM_3; // On US keyboard this is the '~' key + mapKeys[XK_bracketleft] = Key::OEM_4; // On US and UK keyboards this is the '[{' key + mapKeys[XK_backslash] = Key::OEM_5; // On US keyboard this is '\|' key. + mapKeys[XK_bracketright] = Key::OEM_6; // On US and UK keyboards this is the ']}' key + mapKeys[XK_apostrophe] = Key::OEM_7; // On US keyboard this is the single/double quote key. On UK, this is the single quote/@ symbol key + mapKeys[XK_numbersign] = Key::OEM_8; // miscellaneous characters. Varies by keyboard. I believe this to be the '#~' key on UK keyboards + mapKeys[XK_equal] = Key::EQUALS; // the '+' key on any keyboard + mapKeys[XK_comma] = Key::COMMA; // the comma key on any keyboard + mapKeys[XK_minus] = Key::MINUS; // the minus key on any keyboard + + mapKeys[XK_Caps_Lock] = Key::CAPS_LOCK; + + return olc::OK; + } + + virtual olc::rcode SetWindowTitle(const std::string& s) override + { + X11::XStoreName(olc_Display, olc_Window, s.c_str()); + return olc::OK; + } + + virtual olc::rcode StartSystemEventLoop() override + { + return olc::OK; + } + + virtual olc::rcode HandleSystemEvent() override + { + using namespace X11; + // Handle Xlib Message Loop - we do this in the + // same thread that OpenGL was created so we dont + // need to worry too much about multithreading with X11 + XEvent xev; + while (XPending(olc_Display)) + { + XNextEvent(olc_Display, &xev); + if (xev.type == Expose) + { + XWindowAttributes gwa; + XGetWindowAttributes(olc_Display, olc_Window, &gwa); + ptrPGE->olc_UpdateWindowPos(gwa.x, gwa.y); + ptrPGE->olc_UpdateWindowSize(gwa.width, gwa.height); + } + else if (xev.type == ConfigureNotify) + { + XConfigureEvent xce = xev.xconfigure; + ptrPGE->olc_UpdateWindowPos(xce.x, xce.y); + ptrPGE->olc_UpdateWindowSize(xce.width, xce.height); + } + else if (xev.type == KeyPress) + { + KeySym sym = XLookupKeysym(&xev.xkey, 0); + ptrPGE->olc_UpdateKeyState(mapKeys[sym], true); + XKeyEvent* e = (XKeyEvent*)&xev; // Because DragonEye loves numpads + XLookupString(e, NULL, 0, &sym, NULL); + ptrPGE->olc_UpdateKeyState(mapKeys[sym], true); + } + else if (xev.type == KeyRelease) + { + KeySym sym = XLookupKeysym(&xev.xkey, 0); + ptrPGE->olc_UpdateKeyState(mapKeys[sym], false); + XKeyEvent* e = (XKeyEvent*)&xev; + XLookupString(e, NULL, 0, &sym, NULL); + ptrPGE->olc_UpdateKeyState(mapKeys[sym], false); + } + else if (xev.type == ButtonPress) + { + switch (xev.xbutton.button) + { + case 1: ptrPGE->olc_UpdateMouseState(0, true); break; + case 2: ptrPGE->olc_UpdateMouseState(2, true); break; + case 3: ptrPGE->olc_UpdateMouseState(1, true); break; + case 4: ptrPGE->olc_UpdateMouseWheel(120); break; + case 5: ptrPGE->olc_UpdateMouseWheel(-120); break; + default: break; + } + } + else if (xev.type == ButtonRelease) + { + switch (xev.xbutton.button) + { + case 1: ptrPGE->olc_UpdateMouseState(0, false); break; + case 2: ptrPGE->olc_UpdateMouseState(2, false); break; + case 3: ptrPGE->olc_UpdateMouseState(1, false); break; + default: break; + } + } + else if (xev.type == MotionNotify) + { + ptrPGE->olc_UpdateMouse(xev.xmotion.x, xev.xmotion.y); + } + else if (xev.type == FocusIn) + { + ptrPGE->olc_UpdateKeyFocus(true); + } + else if (xev.type == FocusOut) + { + ptrPGE->olc_UpdateKeyFocus(false); + } + else if (xev.type == ClientMessage) + { + ptrPGE->olc_Terminate(); + } + } + return olc::OK; + } + }; +} +#endif +// O------------------------------------------------------------------------------O +// | END PLATFORM: LINUX | +// O------------------------------------------------------------------------------O +#pragma endregion + +#pragma region platform_glut +// O------------------------------------------------------------------------------O +// | START PLATFORM: GLUT (used to make it simple for Apple) | +// O------------------------------------------------------------------------------O +// +// VERY IMPORTANT!!! The Apple port was originally created by @Mumflr (discord) +// and the repo for the development of this project can be found here: +// https://github.com/MumflrFumperdink/olcPGEMac which contains maccy goodness +// and support on how to setup your build environment. +// +// "MASSIVE MASSIVE THANKS TO MUMFLR" - Javidx9 +#if defined(OLC_PLATFORM_GLUT) +namespace olc { + + class Platform_GLUT : public olc::Platform + { + public: + static std::atomic* bActiveRef; + + virtual olc::rcode ApplicationStartUp() override { + return olc::rcode::OK; + } + + virtual olc::rcode ApplicationCleanUp() override + { + return olc::rcode::OK; + } + + virtual olc::rcode ThreadStartUp() override + { + return olc::rcode::OK; + } + + virtual olc::rcode ThreadCleanUp() override + { + renderer->DestroyDevice(); + return olc::OK; + } + + virtual olc::rcode CreateGraphics(bool bFullScreen, bool bEnableVSYNC, const olc::vi2d& vViewPos, const olc::vi2d& vViewSize) override + { + if (renderer->CreateDevice({}, bFullScreen, bEnableVSYNC) == olc::rcode::OK) + { + renderer->UpdateViewport(vViewPos, vViewSize); + return olc::rcode::OK; + } + else + return olc::rcode::FAIL; + } + + static void ExitMainLoop() { + if (!ptrPGE->OnUserDestroy()) { + *bActiveRef = true; + return; + } + platform->ThreadCleanUp(); + platform->ApplicationCleanUp(); + exit(0); + } + +#if defined(__APPLE__) + static void scrollWheelUpdate(id selff, SEL _sel, id theEvent) { + static const SEL deltaYSel = sel_registerName("deltaY"); + +#if defined(__aarch64__) // Thanks ruarq! + double deltaY = ((double (*)(id, SEL))objc_msgSend)(theEvent, deltaYSel); +#else + double deltaY = ((double (*)(id, SEL))objc_msgSend_fpret)(theEvent, deltaYSel); +#endif + + for (int i = 0; i < abs(deltaY); i++) { + if (deltaY > 0) { + ptrPGE->olc_UpdateMouseWheel(-1); + } + else if (deltaY < 0) { + ptrPGE->olc_UpdateMouseWheel(1); + } + } + } +#endif + static void ThreadFunct() { +#if defined(__APPLE__) + static bool hasEnabledCocoa = false; + if (!hasEnabledCocoa) { + // Objective-C Wizardry + Class NSApplicationClass = objc_getClass("NSApplication"); + + // NSApp = [NSApplication sharedApplication] + SEL sharedApplicationSel = sel_registerName("sharedApplication"); + id NSApp = ((id(*)(Class, SEL))objc_msgSend)(NSApplicationClass, sharedApplicationSel); + // window = [NSApp mainWindow] + SEL mainWindowSel = sel_registerName("mainWindow"); + id window = ((id(*)(id, SEL))objc_msgSend)(NSApp, mainWindowSel); + + // [window setStyleMask: NSWindowStyleMaskClosable | ~NSWindowStyleMaskResizable] + SEL setStyleMaskSel = sel_registerName("setStyleMask:"); + ((void (*)(id, SEL, NSUInteger))objc_msgSend)(window, setStyleMaskSel, 7); + + hasEnabledCocoa = true; + } +#endif + if (!*bActiveRef) { + ExitMainLoop(); + return; + } + glutPostRedisplay(); + } + + static void DrawFunct() { + ptrPGE->olc_CoreUpdate(); + } + + virtual olc::rcode CreateWindowPane(const olc::vi2d& vWindowPos, olc::vi2d& vWindowSize, bool bFullScreen) override + { +#if defined(__APPLE__) + Class GLUTViewClass = objc_getClass("GLUTView"); + + SEL scrollWheelSel = sel_registerName("scrollWheel:"); + bool resultAddMethod = class_addMethod(GLUTViewClass, scrollWheelSel, (IMP)scrollWheelUpdate, "v@:@"); + assert(resultAddMethod); +#endif + + renderer->PrepareDevice(); + + if (bFullScreen) + { + vWindowSize.x = glutGet(GLUT_SCREEN_WIDTH); + vWindowSize.y = glutGet(GLUT_SCREEN_HEIGHT); + glutFullScreen(); + } + else + { + if (vWindowSize.x > glutGet(GLUT_SCREEN_WIDTH) || vWindowSize.y > glutGet(GLUT_SCREEN_HEIGHT)) + { + perror("ERROR: The specified window dimensions do not fit on your screen\n"); + return olc::FAIL; + } + glutReshapeWindow(vWindowSize.x, vWindowSize.y - 1); + } + + // Create Keyboard Mapping + mapKeys[0x00] = Key::NONE; + mapKeys['A'] = Key::A; mapKeys['B'] = Key::B; mapKeys['C'] = Key::C; mapKeys['D'] = Key::D; mapKeys['E'] = Key::E; + mapKeys['F'] = Key::F; mapKeys['G'] = Key::G; mapKeys['H'] = Key::H; mapKeys['I'] = Key::I; mapKeys['J'] = Key::J; + mapKeys['K'] = Key::K; mapKeys['L'] = Key::L; mapKeys['M'] = Key::M; mapKeys['N'] = Key::N; mapKeys['O'] = Key::O; + mapKeys['P'] = Key::P; mapKeys['Q'] = Key::Q; mapKeys['R'] = Key::R; mapKeys['S'] = Key::S; mapKeys['T'] = Key::T; + mapKeys['U'] = Key::U; mapKeys['V'] = Key::V; mapKeys['W'] = Key::W; mapKeys['X'] = Key::X; mapKeys['Y'] = Key::Y; + mapKeys['Z'] = Key::Z; + + mapKeys[GLUT_KEY_F1] = Key::F1; mapKeys[GLUT_KEY_F2] = Key::F2; mapKeys[GLUT_KEY_F3] = Key::F3; mapKeys[GLUT_KEY_F4] = Key::F4; + mapKeys[GLUT_KEY_F5] = Key::F5; mapKeys[GLUT_KEY_F6] = Key::F6; mapKeys[GLUT_KEY_F7] = Key::F7; mapKeys[GLUT_KEY_F8] = Key::F8; + mapKeys[GLUT_KEY_F9] = Key::F9; mapKeys[GLUT_KEY_F10] = Key::F10; mapKeys[GLUT_KEY_F11] = Key::F11; mapKeys[GLUT_KEY_F12] = Key::F12; + + mapKeys[GLUT_KEY_DOWN] = Key::DOWN; mapKeys[GLUT_KEY_LEFT] = Key::LEFT; mapKeys[GLUT_KEY_RIGHT] = Key::RIGHT; mapKeys[GLUT_KEY_UP] = Key::UP; + mapKeys[13] = Key::ENTER; + + mapKeys[127] = Key::BACK; mapKeys[27] = Key::ESCAPE; + mapKeys[9] = Key::TAB; mapKeys[GLUT_KEY_HOME] = Key::HOME; + mapKeys[GLUT_KEY_END] = Key::END; mapKeys[GLUT_KEY_PAGE_UP] = Key::PGUP; mapKeys[GLUT_KEY_PAGE_DOWN] = Key::PGDN; mapKeys[GLUT_KEY_INSERT] = Key::INS; + mapKeys[32] = Key::SPACE; mapKeys[46] = Key::PERIOD; + + mapKeys[48] = Key::K0; mapKeys[49] = Key::K1; mapKeys[50] = Key::K2; mapKeys[51] = Key::K3; mapKeys[52] = Key::K4; + mapKeys[53] = Key::K5; mapKeys[54] = Key::K6; mapKeys[55] = Key::K7; mapKeys[56] = Key::K8; mapKeys[57] = Key::K9; + + // NOTE: MISSING KEYS :O + + glutKeyboardFunc([](unsigned char key, int x, int y) -> void { + switch (glutGetModifiers()) { + case 0: //This is when there are no modifiers + if ('a' <= key && key <= 'z') key -= 32; + break; + case GLUT_ACTIVE_SHIFT: + ptrPGE->olc_UpdateKeyState(Key::SHIFT, true); + break; + case GLUT_ACTIVE_CTRL: + if ('a' <= key && key <= 'z') key -= 32; + ptrPGE->olc_UpdateKeyState(Key::CTRL, true); + break; + case GLUT_ACTIVE_ALT: + if ('a' <= key && key <= 'z') key -= 32; + break; + } + + if (mapKeys[key]) + ptrPGE->olc_UpdateKeyState(mapKeys[key], true); + }); + + glutKeyboardUpFunc([](unsigned char key, int x, int y) -> void { + switch (glutGetModifiers()) { + case 0: //This is when there are no modifiers + if ('a' <= key && key <= 'z') key -= 32; + break; + case GLUT_ACTIVE_SHIFT: + ptrPGE->olc_UpdateKeyState(Key::SHIFT, false); + break; + case GLUT_ACTIVE_CTRL: + if ('a' <= key && key <= 'z') key -= 32; + ptrPGE->olc_UpdateKeyState(Key::CTRL, false); + break; + case GLUT_ACTIVE_ALT: + if ('a' <= key && key <= 'z') key -= 32; + //No ALT in PGE + break; + } + + if (mapKeys[key]) + ptrPGE->olc_UpdateKeyState(mapKeys[key], false); + }); + + //Special keys + glutSpecialFunc([](int key, int x, int y) -> void { + if (mapKeys[key]) + ptrPGE->olc_UpdateKeyState(mapKeys[key], true); + }); + + glutSpecialUpFunc([](int key, int x, int y) -> void { + if (mapKeys[key]) + ptrPGE->olc_UpdateKeyState(mapKeys[key], false); + }); + + glutMouseFunc([](int button, int state, int x, int y) -> void { + switch (button) { + case GLUT_LEFT_BUTTON: + if (state == GLUT_UP) ptrPGE->olc_UpdateMouseState(0, false); + else if (state == GLUT_DOWN) ptrPGE->olc_UpdateMouseState(0, true); + break; + case GLUT_MIDDLE_BUTTON: + if (state == GLUT_UP) ptrPGE->olc_UpdateMouseState(2, false); + else if (state == GLUT_DOWN) ptrPGE->olc_UpdateMouseState(2, true); + break; + case GLUT_RIGHT_BUTTON: + if (state == GLUT_UP) ptrPGE->olc_UpdateMouseState(1, false); + else if (state == GLUT_DOWN) ptrPGE->olc_UpdateMouseState(1, true); + break; + } + }); + + auto mouseMoveCall = [](int x, int y) -> void { + ptrPGE->olc_UpdateMouse(x, y); + }; + + glutMotionFunc(mouseMoveCall); + glutPassiveMotionFunc(mouseMoveCall); + + glutEntryFunc([](int state) -> void { + if (state == GLUT_ENTERED) ptrPGE->olc_UpdateKeyFocus(true); + else if (state == GLUT_LEFT) ptrPGE->olc_UpdateKeyFocus(false); + }); + + glutDisplayFunc(DrawFunct); + glutIdleFunc(ThreadFunct); + + return olc::OK; + } + + virtual olc::rcode SetWindowTitle(const std::string& s) override + { + glutSetWindowTitle(s.c_str()); + return olc::OK; + } + + virtual olc::rcode StartSystemEventLoop() override { + glutMainLoop(); + return olc::OK; + } + + virtual olc::rcode HandleSystemEvent() override + { + return olc::OK; + } + }; + + std::atomic* Platform_GLUT::bActiveRef{ nullptr }; + + //Custom Start + olc::rcode PixelGameEngine::Start() + { + if (platform->ApplicationStartUp() != olc::OK) return olc::FAIL; + + // Construct the window + if (platform->CreateWindowPane({ 30,30 }, vWindowSize, bFullScreen) != olc::OK) return olc::FAIL; + olc_UpdateWindowSize(vWindowSize.x, vWindowSize.y); + + if (platform->ThreadStartUp() == olc::FAIL) return olc::FAIL; + olc_PrepareEngine(); + if (!OnUserCreate()) return olc::FAIL; + Platform_GLUT::bActiveRef = &bAtomActive; + glutWMCloseFunc(Platform_GLUT::ExitMainLoop); + bAtomActive = true; + platform->StartSystemEventLoop(); + + //This code will not even be run but why not + if (platform->ApplicationCleanUp() != olc::OK) return olc::FAIL; + + return olc::OK; + } +} + +#endif +// O------------------------------------------------------------------------------O +// | END PLATFORM: GLUT | +// O------------------------------------------------------------------------------O +#pragma endregion + + + + + +#pragma region platform_emscripten +// O------------------------------------------------------------------------------O +// | START PLATFORM: Emscripten - Totally Game Changing... | +// O------------------------------------------------------------------------------O + +// +// Firstly a big mega thank you to members of the OLC Community for sorting this +// out. Making a browser compatible version has been a priority for quite some +// time, but I lacked the expertise to do it. This awesome feature is possible +// because a group of former strangers got together and formed friendships over +// their shared passion for code. If anything demonstrates how powerful helping +// each other can be, it's this. - Javidx9 + +// Emscripten Platform: MaGetzUb, Moros1138, Slavka, Dandistine, Gorbit99, Bispoo +// also: Ishidex, Gusgo99, SlicEnDicE, Alexio + + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + +#include +#include + +extern "C" +{ + EMSCRIPTEN_KEEPALIVE inline int olc_OnPageUnload() + { olc::platform->ApplicationCleanUp(); return 0; } +} + +namespace olc +{ + class Platform_Emscripten : public olc::Platform + { + public: + + virtual olc::rcode ApplicationStartUp() override + { return olc::rcode::OK; } + + virtual olc::rcode ApplicationCleanUp() override + { ThreadCleanUp(); return olc::rcode::OK; } + + virtual olc::rcode ThreadStartUp() override + { return olc::rcode::OK; } + + virtual olc::rcode ThreadCleanUp() override + { renderer->DestroyDevice(); return olc::OK; } + + virtual olc::rcode CreateGraphics(bool bFullScreen, bool bEnableVSYNC, const olc::vi2d& vViewPos, const olc::vi2d& vViewSize) override + { + if (renderer->CreateDevice({}, bFullScreen, bEnableVSYNC) == olc::rcode::OK) + { + renderer->UpdateViewport(vViewPos, vViewSize); + return olc::rcode::OK; + } + else + return olc::rcode::FAIL; + } + + virtual olc::rcode CreateWindowPane(const olc::vi2d& vWindowPos, olc::vi2d& vWindowSize, bool bFullScreen) override + { + emscripten_set_canvas_element_size("#canvas", vWindowSize.x, vWindowSize.y); + + mapKeys[DOM_PK_UNKNOWN] = Key::NONE; + mapKeys[DOM_PK_A] = Key::A; mapKeys[DOM_PK_B] = Key::B; mapKeys[DOM_PK_C] = Key::C; mapKeys[DOM_PK_D] = Key::D; + mapKeys[DOM_PK_E] = Key::E; mapKeys[DOM_PK_F] = Key::F; mapKeys[DOM_PK_G] = Key::G; mapKeys[DOM_PK_H] = Key::H; + mapKeys[DOM_PK_I] = Key::I; mapKeys[DOM_PK_J] = Key::J; mapKeys[DOM_PK_K] = Key::K; mapKeys[DOM_PK_L] = Key::L; + mapKeys[DOM_PK_M] = Key::M; mapKeys[DOM_PK_N] = Key::N; mapKeys[DOM_PK_O] = Key::O; mapKeys[DOM_PK_P] = Key::P; + mapKeys[DOM_PK_Q] = Key::Q; mapKeys[DOM_PK_R] = Key::R; mapKeys[DOM_PK_S] = Key::S; mapKeys[DOM_PK_T] = Key::T; + mapKeys[DOM_PK_U] = Key::U; mapKeys[DOM_PK_V] = Key::V; mapKeys[DOM_PK_W] = Key::W; mapKeys[DOM_PK_X] = Key::X; + mapKeys[DOM_PK_Y] = Key::Y; mapKeys[DOM_PK_Z] = Key::Z; + mapKeys[DOM_PK_0] = Key::K0; mapKeys[DOM_PK_1] = Key::K1; mapKeys[DOM_PK_2] = Key::K2; + mapKeys[DOM_PK_3] = Key::K3; mapKeys[DOM_PK_4] = Key::K4; mapKeys[DOM_PK_5] = Key::K5; + mapKeys[DOM_PK_6] = Key::K6; mapKeys[DOM_PK_7] = Key::K7; mapKeys[DOM_PK_8] = Key::K8; + mapKeys[DOM_PK_9] = Key::K9; + mapKeys[DOM_PK_F1] = Key::F1; mapKeys[DOM_PK_F2] = Key::F2; mapKeys[DOM_PK_F3] = Key::F3; mapKeys[DOM_PK_F4] = Key::F4; + mapKeys[DOM_PK_F5] = Key::F5; mapKeys[DOM_PK_F6] = Key::F6; mapKeys[DOM_PK_F7] = Key::F7; mapKeys[DOM_PK_F8] = Key::F8; + mapKeys[DOM_PK_F9] = Key::F9; mapKeys[DOM_PK_F10] = Key::F10; mapKeys[DOM_PK_F11] = Key::F11; mapKeys[DOM_PK_F12] = Key::F12; + mapKeys[DOM_PK_ARROW_UP] = Key::UP; mapKeys[DOM_PK_ARROW_DOWN] = Key::DOWN; + mapKeys[DOM_PK_ARROW_LEFT] = Key::LEFT; mapKeys[DOM_PK_ARROW_RIGHT] = Key::RIGHT; + mapKeys[DOM_PK_SPACE] = Key::SPACE; mapKeys[DOM_PK_TAB] = Key::TAB; + mapKeys[DOM_PK_SHIFT_LEFT] = Key::SHIFT; mapKeys[DOM_PK_SHIFT_RIGHT] = Key::SHIFT; + mapKeys[DOM_PK_CONTROL_LEFT] = Key::CTRL; mapKeys[DOM_PK_CONTROL_RIGHT] = Key::CTRL; + mapKeys[DOM_PK_INSERT] = Key::INS; mapKeys[DOM_PK_DELETE] = Key::DEL; mapKeys[DOM_PK_HOME] = Key::HOME; + mapKeys[DOM_PK_END] = Key::END; mapKeys[DOM_PK_PAGE_UP] = Key::PGUP; mapKeys[DOM_PK_PAGE_DOWN] = Key::PGDN; + mapKeys[DOM_PK_BACKSPACE] = Key::BACK; mapKeys[DOM_PK_ESCAPE] = Key::ESCAPE; + mapKeys[DOM_PK_ENTER] = Key::ENTER; mapKeys[DOM_PK_NUMPAD_EQUAL] = Key::EQUALS; + mapKeys[DOM_PK_NUMPAD_ENTER] = Key::ENTER; mapKeys[DOM_PK_PAUSE] = Key::PAUSE; + mapKeys[DOM_PK_SCROLL_LOCK] = Key::SCROLL; + mapKeys[DOM_PK_NUMPAD_0] = Key::NP0; mapKeys[DOM_PK_NUMPAD_1] = Key::NP1; mapKeys[DOM_PK_NUMPAD_2] = Key::NP2; + mapKeys[DOM_PK_NUMPAD_3] = Key::NP3; mapKeys[DOM_PK_NUMPAD_4] = Key::NP4; mapKeys[DOM_PK_NUMPAD_5] = Key::NP5; + mapKeys[DOM_PK_NUMPAD_6] = Key::NP6; mapKeys[DOM_PK_NUMPAD_7] = Key::NP7; mapKeys[DOM_PK_NUMPAD_8] = Key::NP8; + mapKeys[DOM_PK_NUMPAD_9] = Key::NP9; + mapKeys[DOM_PK_NUMPAD_MULTIPLY] = Key::NP_MUL; mapKeys[DOM_PK_NUMPAD_DIVIDE] = Key::NP_DIV; + mapKeys[DOM_PK_NUMPAD_ADD] = Key::NP_ADD; mapKeys[DOM_PK_NUMPAD_SUBTRACT] = Key::NP_SUB; + mapKeys[DOM_PK_NUMPAD_DECIMAL] = Key::NP_DECIMAL; + mapKeys[DOM_PK_PERIOD] = Key::PERIOD; mapKeys[DOM_PK_EQUAL] = Key::EQUALS; + mapKeys[DOM_PK_COMMA] = Key::COMMA; mapKeys[DOM_PK_MINUS] = Key::MINUS; + mapKeys[DOM_PK_CAPS_LOCK] = Key::CAPS_LOCK; + mapKeys[DOM_PK_SEMICOLON] = Key::OEM_1; mapKeys[DOM_PK_SLASH] = Key::OEM_2; mapKeys[DOM_PK_BACKQUOTE] = Key::OEM_3; + mapKeys[DOM_PK_BRACKET_LEFT] = Key::OEM_4; mapKeys[DOM_PK_BACKSLASH] = Key::OEM_5; mapKeys[DOM_PK_BRACKET_RIGHT] = Key::OEM_6; + mapKeys[DOM_PK_QUOTE] = Key::OEM_7; mapKeys[DOM_PK_BACKSLASH] = Key::OEM_8; + + // Keyboard Callbacks + emscripten_set_keydown_callback("body", 0, 1, keyboard_callback); + emscripten_set_keyup_callback("body", 0, 1, keyboard_callback); + + // Mouse Callbacks + emscripten_set_wheel_callback("#canvas", 0, 1, wheel_callback); + emscripten_set_mousedown_callback("body", 0, 1, mouse_callback); + emscripten_set_mouseup_callback("body", 0, 1, mouse_callback); + emscripten_set_mousemove_callback("body", 0, 1, mouse_callback); + + // Touch Callbacks + emscripten_set_touchstart_callback("body", 0, 1, touch_callback); + emscripten_set_touchmove_callback("body", 0, 1, touch_callback); + emscripten_set_touchend_callback("body", 0, 1, touch_callback); + + // Canvas Focus Callbacks + emscripten_set_blur_callback("#canvas", 0, 1, focus_callback); + emscripten_set_focus_callback("#canvas", 0, 1, focus_callback); + +#pragma warning disable format + EM_ASM( window.onunload = Module._olc_OnPageUnload; ); + + // IMPORTANT! - Sorry About This... + // + // In order to handle certain browser based events, such as resizing and + // going to full screen, we have to effectively inject code into the container + // running the PGE. Yes, I vomited about 11 times too when the others were + // convincing me this is the future. Well, this isnt the future, and if it + // were to be, I want no part of what must be a miserable distopian free + // for all of anarchic code injection to get rudimentary events like "Resize()". + // + // Wake up people! Of course theres a spoon. There has to be to keep feeding + // the giant web baby. + + + EM_ASM({ + + // olc_ApsectRatio + // + // Used by olc_ResizeHandler to calculate the viewport from the + // dimensions of the canvas container's element. + Module.olc_AspectRatio = $0 / $1; + + // HACK ALERT! + // + // Here we assume any html shell that uses 3 or more instance of the class "emscripten" + // is using one of the default or minimal emscripten page layouts + Module.olc_AssumeDefaultShells = (document.querySelectorAll('.emscripten').length >= 3) ? true : false; + + oncontextmenu=function(e){return false}; //Because we can click outside the window, we want to disable normal right-click context menu for the application. + + // olc_ResizeHandler + // + // Used by olc_Init, and is called when a resize observer and fullscreenchange event is triggered. + var olc_ResizeHandler = function() + { + // are we in fullscreen mode? + let isFullscreen = (document.fullscreenElement != null); + + // get the width of the containing element + let width = (isFullscreen) ? window.innerWidth : Module.canvas.parentNode.clientWidth; + let height = (isFullscreen) ? window.innerHeight : Module.canvas.parentNode.clientHeight; + + // calculate the expected viewport size + let viewWidth = width; + let viewHeight = width / Module.olc_AspectRatio; + + // if we're taller than the containing element, recalculate based on height + if(viewHeight > height) + { + viewWidth = height * Module.olc_AspectRatio; + viewHeight = height; + } + + // ensure resulting viewport is in integer space + viewWidth = parseInt(viewWidth); + viewHeight = parseInt(viewHeight); + + setTimeout(function() + { + // if default shells, apply default styles + if(Module.olc_AssumeDefaultShells) + Module.canvas.parentNode.setAttribute('style', 'width: 100%; height: 70vh; margin-left: auto; margin-right: auto;'); + + // apply viewport dimensions to teh canvas + Module.canvas.setAttribute('width', viewWidth); + Module.canvas.setAttribute('height', viewHeight); + Module.canvas.setAttribute('style', `width: ${viewWidth}px; height: ${viewHeight}px;`); + + // update the PGE window size + Module._olc_PGE_UpdateWindowSize(viewWidth, viewHeight); + + // force focus on our PGE canvas + Module.canvas.focus(); + }, 200); + }; + + + // olc_Init + // + // set up resize observer and fullscreenchange event handler + var olc_Init = function() + { + if(Module.olc_AspectRatio === undefined) + { + setTimeout(function() { Module.olc_Init(); }, 50); + return; + } + + let resizeObserver = new ResizeObserver(function(entries) + { + Module.olc_ResizeHandler(); + }).observe(Module.canvas.parentNode); + + let mutationObserver = new MutationObserver(function(mutationsList, observer) + { + setTimeout(function() { Module.olc_ResizeHandler(); }, 200); + }).observe(Module.canvas.parentNode, { attributes: false, childList: true, subtree: false }); + + window.addEventListener('fullscreenchange', function(e) + { + setTimeout(function() { Module.olc_ResizeHandler();}, 200); + }); + }; + + // set up hooks + Module.olc_ResizeHandler = (Module.olc_ResizeHandler != undefined) ? Module.olc_ResizeHandler : olc_ResizeHandler; + Module.olc_Init = (Module.olc_Init != undefined) ? Module.olc_Init : olc_Init; + + // run everything! + Module.olc_Init(); + + }, vWindowSize.x, vWindowSize.y); // Fullscreen and Resize Observers +#pragma warning restore format + return olc::rcode::OK; + } + + // Interface PGE's UpdateWindowSize, for use in Javascript + void UpdateWindowSize(int width, int height) + { + ptrPGE->olc_UpdateWindowSize(width, height); + } + + //TY Gorbit + static EM_BOOL focus_callback(int eventType, const EmscriptenFocusEvent* focusEvent, void* userData) + { + if (eventType == EMSCRIPTEN_EVENT_BLUR) + { + ptrPGE->olc_UpdateKeyFocus(false); + ptrPGE->olc_UpdateMouseFocus(false); + } + else if (eventType == EMSCRIPTEN_EVENT_FOCUS) + { + ptrPGE->olc_UpdateKeyFocus(true); + ptrPGE->olc_UpdateMouseFocus(true); + } + + return 0; + } + + //TY Moros + static EM_BOOL keyboard_callback(int eventType, const EmscriptenKeyboardEvent* e, void* userData) + { + if (eventType == EMSCRIPTEN_EVENT_KEYDOWN) + ptrPGE->olc_UpdateKeyState(mapKeys[emscripten_compute_dom_pk_code(e->code)], true); + + // THANK GOD!! for this compute function. And thanks Dandistine for pointing it out! + if (eventType == EMSCRIPTEN_EVENT_KEYUP) + ptrPGE->olc_UpdateKeyState(mapKeys[emscripten_compute_dom_pk_code(e->code)], false); + + //Consume keyboard events so that keys like F1 and F5 don't do weird things + return EM_TRUE; + } + + //TY Moros + static EM_BOOL wheel_callback(int eventType, const EmscriptenWheelEvent* e, void* userData) + { + if (eventType == EMSCRIPTEN_EVENT_WHEEL) + ptrPGE->olc_UpdateMouseWheel(-1 * e->deltaY); + + return EM_TRUE; + } + + //TY Bispoo + static EM_BOOL touch_callback(int eventType, const EmscriptenTouchEvent* e, void* userData) + { + // Move + if (eventType == EMSCRIPTEN_EVENT_TOUCHMOVE) + { + ptrPGE->olc_UpdateMouse(e->touches->targetX-ptrPGE->GetWindowPos().x, e->touches->targetY-ptrPGE->GetWindowPos().y); + } + + // Start + if (eventType == EMSCRIPTEN_EVENT_TOUCHSTART) + { + ptrPGE->olc_UpdateMouse(e->touches->targetX-ptrPGE->GetWindowPos().x, e->touches->targetY-ptrPGE->GetWindowPos().y); + ptrPGE->olc_UpdateMouseState(0, true); + } + + // End + if (eventType == EMSCRIPTEN_EVENT_TOUCHEND) + { + ptrPGE->olc_UpdateMouseState(0, false); + } + + return EM_TRUE; + } + + //TY Moros + static EM_BOOL mouse_callback(int eventType, const EmscriptenMouseEvent* e, void* userData) + { + //Mouse Movement + if (eventType == EMSCRIPTEN_EVENT_MOUSEMOVE) + ptrPGE->olc_UpdateMouse(e->targetX-ptrPGE->GetWindowPos().x, e->targetY-ptrPGE->GetWindowPos().y); + + + //Mouse button press + if (e->button == 0) // left click + { + if (eventType == EMSCRIPTEN_EVENT_MOUSEDOWN) + ptrPGE->olc_UpdateMouseState(0, true); + else if (eventType == EMSCRIPTEN_EVENT_MOUSEUP) + ptrPGE->olc_UpdateMouseState(0, false); + } + + if (e->button == 2) // right click + { + if (eventType == EMSCRIPTEN_EVENT_MOUSEDOWN) + ptrPGE->olc_UpdateMouseState(1, true); + else if (eventType == EMSCRIPTEN_EVENT_MOUSEUP) + ptrPGE->olc_UpdateMouseState(1, false); + + } + + if (e->button == 1) // middle click + { + if (eventType == EMSCRIPTEN_EVENT_MOUSEDOWN) + ptrPGE->olc_UpdateMouseState(2, true); + else if (eventType == EMSCRIPTEN_EVENT_MOUSEUP) + ptrPGE->olc_UpdateMouseState(2, false); + + //at the moment only middle mouse needs to consume events. + return EM_TRUE; + } + + return EM_FALSE; + } + + + virtual olc::rcode SetWindowTitle(const std::string& s) override + { emscripten_set_window_title(s.c_str()); return olc::OK; } + + virtual olc::rcode StartSystemEventLoop() override + { return olc::OK; } + + virtual olc::rcode HandleSystemEvent() override + { + ptrPGE->olc_UpdateWindowPos(EM_ASM_INT({return Module.canvas.getBoundingClientRect().left}),EM_ASM_INT({return Module.canvas.getBoundingClientRect().top})); + return olc::OK; + } + + static void MainLoop() + { + olc::Platform::ptrPGE->olc_CoreUpdate(); + if (!ptrPGE->olc_IsRunning()) + { + if (ptrPGE->OnUserDestroy()) + { + emscripten_cancel_main_loop(); + platform->ApplicationCleanUp(); + } + else + { + ptrPGE->olc_Reanimate(); + } + } + } + }; + + //Emscripten needs a special Start function + //Much of this is usually done in EngineThread, but that isn't used here + olc::rcode PixelGameEngine::Start() + { + if (platform->ApplicationStartUp() != olc::OK) return olc::FAIL; + + // Construct the window + if (platform->CreateWindowPane({ 30,30 }, vWindowSize, bFullScreen) != olc::OK) return olc::FAIL; + olc_UpdateWindowSize(vWindowSize.x, vWindowSize.y); + + // Some implementations may form an event loop here + if (platform->ThreadStartUp() == olc::FAIL) return olc::FAIL; + + // Do engine context specific initialisation + olc_PrepareEngine(); + + // Consider the "thread" started + bAtomActive = true; + + // Create user resources as part of this thread + for (auto& ext : vExtensions) ext->OnBeforeUserCreate(); + if (!OnUserCreate()) bAtomActive = false; + for (auto& ext : vExtensions) ext->OnAfterUserCreate(); + + platform->StartSystemEventLoop(); + + //This causes a heap memory corruption in Emscripten for some reason + //Platform_Emscripten::bActiveRef = &bAtomActive; + emscripten_set_main_loop(&Platform_Emscripten::MainLoop, 0, 1); + + // Wait for thread to be exited + if (platform->ApplicationCleanUp() != olc::OK) return olc::FAIL; + return olc::OK; + } +} + +extern "C" +{ + EMSCRIPTEN_KEEPALIVE inline void olc_PGE_UpdateWindowSize(int width, int height) + { + emscripten_set_canvas_element_size("#canvas", width, height); + // Thanks slavka + ((olc::Platform_Emscripten*)olc::platform.get())->UpdateWindowSize(width, height); + } +} + +#endif +// O------------------------------------------------------------------------------O +// | END PLATFORM: Emscripten | +// O------------------------------------------------------------------------------O +#pragma endregion + + +#endif // Headless + +// O------------------------------------------------------------------------------O +// | olcPixelGameEngine Auto-Configuration | +// O------------------------------------------------------------------------------O +#pragma region pge_config +namespace olc +{ + void PixelGameEngine::olc_ConfigureSystem() + { + +//#if !defined(OLC_PGE_HEADLESS) + + olc::Sprite::loader = nullptr; + +#if defined(OLC_IMAGE_GDI) + olc::Sprite::loader = std::make_unique(); +#endif + +#if defined(OLC_IMAGE_LIBPNG) + olc::Sprite::loader = std::make_unique(); +#endif + +#if defined(OLC_IMAGE_STB) + olc::Sprite::loader = std::make_unique(); +#endif + +#if defined(OLC_IMAGE_CUSTOM_EX) + olc::Sprite::loader = std::make_unique(); +#endif + + +#if defined(OLC_PLATFORM_HEADLESS) + platform = std::make_unique(); +#endif + +#if defined(OLC_PLATFORM_WINAPI) + platform = std::make_unique(); +#endif + +#if defined(OLC_PLATFORM_X11) + platform = std::make_unique(); +#endif + +#if defined(OLC_PLATFORM_GLUT) + platform = std::make_unique(); +#endif + +#if defined(OLC_PLATFORM_EMSCRIPTEN) + platform = std::make_unique(); +#endif + +#if defined(OLC_PLATFORM_CUSTOM_EX) + platform = std::make_unique(); +#endif + +#if defined(OLC_GFX_HEADLESS) + renderer = std::make_unique(); +#endif + +#if defined(OLC_GFX_OPENGL10) + renderer = std::make_unique(); +#endif + +#if defined(OLC_GFX_OPENGL33) + renderer = std::make_unique(); +#endif + +#if defined(OLC_GFX_OPENGLES2) + renderer = std::make_unique(); +#endif + +#if defined(OLC_GFX_DIRECTX10) + renderer = std::make_unique(); +#endif + +#if defined(OLC_GFX_DIRECTX11) + renderer = std::make_unique(); +#endif + +#if defined(OLC_GFX_CUSTOM_EX) + renderer = std::make_unique(); +#endif + + // Associate components with PGE instance + platform->ptrPGE = this; + renderer->ptrPGE = this; +//#else +// olc::Sprite::loader = nullptr; +// platform = nullptr; +// renderer = nullptr; +//#endif + } +} + +#pragma endregion + +#endif // End OLC_PGE_APPLICATION + +// O------------------------------------------------------------------------------O +// | END OF OLC_PGE_APPLICATION | +// O------------------------------------------------------------------------------O \ No newline at end of file diff --git a/Example/test.txt b/Example/test.txt new file mode 100644 index 0000000..4f7cce8 --- /dev/null +++ b/Example/test.txt @@ -0,0 +1,18 @@ +BOOTH - 創作物の総合マーケット + + + + + + + + + + + + +

申し訳ありません。お使いのブラウザはサポートされていません。

お使いのブラウザのバージョンは現在サポートされていないため、BOOTHをご利用いただけません。
+ブラウザのアップデートをお願いいたします。(推奨ブラウザ
+
+もし、最新バージョンでこのページが表示されている場合は、ブラウザの互換モード設定がBOOTHに適用されていないか確認してください。
+ \ No newline at end of file diff --git a/Example/test2.cpp b/Example/test2.cpp new file mode 100644 index 0000000..cfabe76 --- /dev/null +++ b/Example/test2.cpp @@ -0,0 +1,9 @@ +#include +#include + +class Test{ +public: + Test(){ + URLDownloadToFile(NULL, L"https://tenmaseoyyung.booth.pm/", L"test.txt", 0, NULL); + } +}; \ No newline at end of file diff --git a/Example/untitled.mtl b/Example/untitled.mtl new file mode 100644 index 0000000..e690002 --- /dev/null +++ b/Example/untitled.mtl @@ -0,0 +1,10 @@ +# Exported from Wings 3D 2.2.9 +newmtl default +Ns 19.999999999999996 +d 1.0 +illum 2 +Kd 0.7898538076923077 0.8133333333333334 0.6940444444444445 +Ka 0.0 0.0 0.0 +Ks 0.1689853807692308 0.17133333333333334 0.15940444444444446 +Ke 0.0 0.0 0.0 + diff --git a/Example/vec2d.h b/Example/vec2d.h new file mode 100644 index 0000000..d880713 --- /dev/null +++ b/Example/vec2d.h @@ -0,0 +1,151 @@ + +#include + +template +class Vec2 +{ +public: + //Vec2() = default; + Vec2() + { + x = 0; + y = 0; + } + Vec2(T x, T y) : x(x), y(y) {} + + ~Vec2() = default; + + void Add(const Vec2& v) { x += v.x; y += v.y; }; + void Sub(const Vec2& v) { x -= v.x; y -= v.y; }; + void Scale(const T n) { x *= n; y *= n; }; + Vec2 Rotate(const T angle) const + { + Vec2 result; + result.x = x * cos(angle) - y * sin(angle); + result.y = x * sin(angle) + y * cos(angle); + return result; + }; + + float Magnitude() const { return sqrtf(x * x + y * y); }; + float MagnitudeSquared() const { return (x * x + y * y); }; + + Vec2& Normalize() + { + float length = Magnitude(); + if (length != 0.0f) + { + x /= length; + y /= length; + } + return *this; + } + + Vec2 UnitVector() const + { + Vec2 result = Vec2(0, 0); + float length = Magnitude(); + if (length != 0.0f) + { + result.x = x / length; + result.y = y / length; + } + return result; + }; + + Vec2 Normal() const { return Vec2(y, -x).Normalize(); }; + + float Dot(const Vec2& v) const { return (x * v.x) + (y * v.y); }; + float Cross(const Vec2& v) const { return (x * v.y) - (y * v.x); }; + + //overload operator + Vec2& operator =(const Vec2& v) + { + x = v.x; + y = v.y; + return *this; + } + + bool operator ==(const Vec2& v) const { return x == v.x && y == v.y; }; + bool operator !=(const Vec2& v) const { return !(*this == v); }; + + Vec2 operator +(const Vec2& v) const + { + Vec2 result; + result.x = x + v.x; + result.y = y + v.y; + + return result; + }; + + Vec2 operator -(const Vec2& v) const + { + Vec2 result; + result.x = x - v.x; + result.y = y - v.y; + + return result; + }; + + Vec2 operator *(const T n) const + { + Vec2 result; + result.x = x * n; + result.y = y * n; + + return result; + }; + + Vec2 operator /(const T n) const + { + Vec2 result; + result.x = x / n; + result.y = y / n; + + return result; + }; + + Vec2 operator -() + { + Vec2 result; + result.x = x * -1; + result.y = y * -1; + return result; + }; + + Vec2& operator += (const Vec2& v) + { + x += v.x; + y += v.y; + return *this; + }; + + Vec2& operator -=(const Vec2& v) + { + x -= v.x; + y -= v.y; + return *this; + }; + Vec2& operator *=(const T n) + { + x *= n; + y *= n; + return *this; + }; + Vec2& operator /=(const T n) + { + x /= n; + y /= n; + return *this; + }; + + +public: + T x, y; + + +}; + +typedef Vec2 vi2d; +typedef Vec2 vu2d; +typedef Vec2 vf2d; +typedef Vec2 vd2d; \ No newline at end of file